summaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkfront.c
blob: 60eed4bdd2e4528ae3c3b8871cd65f85d3c34952 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
/*
 * blkfront.c
 *
 * XenLinux virtual block device driver.
 *
 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
 * Copyright (c) 2004, Christian Limpach
 * Copyright (c) 2004, Andrew Warfield
 * Copyright (c) 2005, Christopher Clark
 * Copyright (c) 2005, XenSource Ltd
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>

#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/grant_table.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/platform_pci.h>

#include <xen/interface/grant_table.h>
#include <xen/interface/io/blkif.h>
#include <xen/interface/io/protocols.h>

#include <asm/xen/hypervisor.h>

enum blkif_state {
	BLKIF_STATE_DISCONNECTED,
	BLKIF_STATE_CONNECTED,
	BLKIF_STATE_SUSPENDED,
};

struct blk_shadow {
	struct blkif_request req;
	struct request *request;
	unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};

static DEFINE_MUTEX(blkfront_mutex);
static const struct block_device_operations xlvbd_block_fops;

#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)

/*
 * We have one of these per vbd, whether ide, scsi or 'other'.  They
 * hang in private_data off the gendisk structure. We may end up
 * putting all kinds of interesting stuff here :-)
 */
struct blkfront_info
{
	spinlock_t io_lock;
	struct mutex mutex;
	struct xenbus_device *xbdev;
	struct gendisk *gd;
	int vdevice;
	blkif_vdev_t handle;
	enum blkif_state connected;
	int ring_ref;
	struct blkif_front_ring ring;
	struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	unsigned int evtchn, irq;
	struct request_queue *rq;
	struct work_struct work;
	struct gnttab_free_callback callback;
	struct blk_shadow shadow[BLK_RING_SIZE];
	unsigned long shadow_free;
	unsigned int feature_flush;
	unsigned int flush_op;
	unsigned int feature_discard:1;
	unsigned int feature_secdiscard:1;
	unsigned int discard_granularity;
	unsigned int discard_alignment;
	int is_ready;
};

static unsigned int nr_minors;
static unsigned long *minors;
static DEFINE_SPINLOCK(minor_lock);

#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
	(BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
#define GRANT_INVALID_REF	0

#define PARTS_PER_DISK		16
#define PARTS_PER_EXT_DISK      256

#define BLKIF_MAJOR(dev) ((dev)>>8)
#define BLKIF_MINOR(dev) ((dev) & 0xff)

#define EXT_SHIFT 28
#define EXTENDED (1<<EXT_SHIFT)
#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
#define EMULATED_HD_DISK_MINOR_OFFSET (0)
#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
#define EMULATED_SD_DISK_MINOR_OFFSET (0)
#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)

#define DEV_NAME	"xvd"	/* name in /dev */

static int get_id_from_freelist(struct blkfront_info *info)
{
	unsigned long free = info->shadow_free;
	BUG_ON(free >= BLK_RING_SIZE);
	info->shadow_free = info->shadow[free].req.u.rw.id;
	info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
	return free;
}

static void add_id_to_freelist(struct blkfront_info *info,
			       unsigned long id)
{
	info->shadow[id].req.u.rw.id  = info->shadow_free;
	info->shadow[id].request = NULL;
	info->shadow_free = id;
}

static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
{
	unsigned int end = minor + nr;
	int rc;

	if (end > nr_minors) {
		unsigned long *bitmap, *old;

		bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
				 GFP_KERNEL);
		if (bitmap == NULL)
			return -ENOMEM;

		spin_lock(&minor_lock);
		if (end > nr_minors) {
			old = minors;
			memcpy(bitmap, minors,
			       BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
			minors = bitmap;
			nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
		} else
			old = bitmap;
		spin_unlock(&minor_lock);
		kfree(old);
	}

	spin_lock(&minor_lock);
	if (find_next_bit(minors, end, minor) >= end) {
		bitmap_set(minors, minor, nr);
		rc = 0;
	} else
		rc = -EBUSY;
	spin_unlock(&minor_lock);

	return rc;
}

static void xlbd_release_minors(unsigned int minor, unsigned int nr)
{
	unsigned int end = minor + nr;

	BUG_ON(end > nr_minors);
	spin_lock(&minor_lock);
	bitmap_clear(minors,  minor, nr);
	spin_unlock(&minor_lock);
}

static void blkif_restart_queue_callback(void *arg)
{
	struct blkfront_info *info = (struct blkfront_info *)arg;
	schedule_work(&info->work);
}

static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
{
	/* We don't have real geometry info, but let's at least return
	   values consistent with the size of the device */
	sector_t nsect = get_capacity(bd->bd_disk);
	sector_t cylinders = nsect;

	hg->heads = 0xff;
	hg->sectors = 0x3f;
	sector_div(cylinders, hg->heads * hg->sectors);
	hg->cylinders = cylinders;
	if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
		hg->cylinders = 0xffff;
	return 0;
}

static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
		       unsigned command, unsigned long argument)
{
	struct blkfront_info *info = bdev->bd_disk->private_data;
	int i;

	dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
		command, (long)argument);

	switch (command) {
	case CDROMMULTISESSION:
		dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
			if (put_user(0, (char __user *)(argument + i)))
				return -EFAULT;
		return 0;

	case CDROM_GET_CAPABILITY: {
		struct gendisk *gd = info->gd;
		if (gd->flags & GENHD_FL_CD)
			return 0;
		return -EINVAL;
	}

	default:
		/*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
		  command);*/
		return -EINVAL; /* same return as native Linux */
	}

	return 0;
}

/*
 * Generate a Xen blkfront IO request from a blk layer request.  Reads
 * and writes are handled as expected.
 *
 * @req: a request struct
 */
static int blkif_queue_request(struct request *req)
{
	struct blkfront_info *info = req->rq_disk->private_data;
	unsigned long buffer_mfn;
	struct blkif_request *ring_req;
	unsigned long id;
	unsigned int fsect, lsect;
	int i, ref;
	grant_ref_t gref_head;
	struct scatterlist *sg;

	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
		return 1;

	if (gnttab_alloc_grant_references(
		BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
		gnttab_request_free_callback(
			&info->callback,
			blkif_restart_queue_callback,
			info,
			BLKIF_MAX_SEGMENTS_PER_REQUEST);
		return 1;
	}

	/* Fill out a communications ring structure. */
	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
	id = get_id_from_freelist(info);
	info->shadow[id].request = req;

	ring_req->u.rw.id = id;
	ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
	ring_req->u.rw.handle = info->handle;

	ring_req->operation = rq_data_dir(req) ?
		BLKIF_OP_WRITE : BLKIF_OP_READ;

	if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
		/*
		 * Ideally we can do an unordered flush-to-disk. In case the
		 * backend onlysupports barriers, use that. A barrier request
		 * a superset of FUA, so we can implement it the same
		 * way.  (It's also a FLUSH+FUA, since it is
		 * guaranteed ordered WRT previous writes.)
		 */
		ring_req->operation = info->flush_op;
	}

	if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
		/* id, sector_number and handle are set above. */
		ring_req->operation = BLKIF_OP_DISCARD;
		ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
		if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
			ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
		else
			ring_req->u.discard.flag = 0;
	} else {
		ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
							   info->sg);
		BUG_ON(ring_req->u.rw.nr_segments >
		       BLKIF_MAX_SEGMENTS_PER_REQUEST);

		for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
			buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
			fsect = sg->offset >> 9;
			lsect = fsect + (sg->length >> 9) - 1;
			/* install a grant reference. */
			ref = gnttab_claim_grant_reference(&gref_head);
			BUG_ON(ref == -ENOSPC);

			gnttab_grant_foreign_access_ref(
					ref,
					info->xbdev->otherend_id,
					buffer_mfn,
					rq_data_dir(req));

			info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
			ring_req->u.rw.seg[i] =
					(struct blkif_request_segment) {
						.gref       = ref,
						.first_sect = fsect,
						.last_sect  = lsect };
		}
	}

	info->ring.req_prod_pvt++;

	/* Keep a private copy so we can reissue requests when recovering. */
	info->shadow[id].req = *ring_req;

	gnttab_free_grant_references(gref_head);

	return 0;
}


static inline void flush_requests(struct blkfront_info *info)
{
	int notify;

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);

	if (notify)
		notify_remote_via_irq(info->irq);
}

/*
 * do_blkif_request
 *  read a block; request is in a request queue
 */
static void do_blkif_request(struct request_queue *rq)
{
	struct blkfront_info *info = NULL;
	struct request *req;
	int queued;

	pr_debug("Entered do_blkif_request\n");

	queued = 0;

	while ((req = blk_peek_request(rq)) != NULL) {
		info = req->rq_disk->private_data;

		if (RING_FULL(&info->ring))
			goto wait;

		blk_start_request(req);

		if ((req->cmd_type != REQ_TYPE_FS) ||
		    ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) &&
		    !info->flush_op)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}

		pr_debug("do_blk_req %p: cmd %p, sec %lx, "
			 "(%u/%u) buffer:%p [%s]\n",
			 req, req->cmd, (unsigned long)blk_rq_pos(req),
			 blk_rq_cur_sectors(req), blk_rq_sectors(req),
			 req->buffer, rq_data_dir(req) ? "write" : "read");

		if (blkif_queue_request(req)) {
			blk_requeue_request(rq, req);
wait:
			/* Avoid pointless unplugs. */
			blk_stop_queue(rq);
			break;
		}

		queued++;
	}

	if (queued != 0)
		flush_requests(info);
}

static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
{
	struct request_queue *rq;
	struct blkfront_info *info = gd->private_data;

	rq = blk_init_queue(do_blkif_request, &info->io_lock);
	if (rq == NULL)
		return -1;

	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);

	if (info->feature_discard) {
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
		blk_queue_max_discard_sectors(rq, get_capacity(gd));
		rq->limits.discard_granularity = info->discard_granularity;
		rq->limits.discard_alignment = info->discard_alignment;
		if (info->feature_secdiscard)
			queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
	}

	/* Hard sector size and max sectors impersonate the equiv. hardware. */
	blk_queue_logical_block_size(rq, sector_size);
	blk_queue_max_hw_sectors(rq, 512);

	/* Each segment in a request is up to an aligned page in size. */
	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
	blk_queue_max_segment_size(rq, PAGE_SIZE);

	/* Ensure a merged request will fit in a single I/O ring slot. */
	blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);

	/* Make sure buffer addresses are sector-aligned. */
	blk_queue_dma_alignment(rq, 511);

	/* Make sure we don't use bounce buffers. */
	blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);

	gd->queue = rq;

	return 0;
}


static void xlvbd_flush(struct blkfront_info *info)
{
	blk_queue_flush(info->rq, info->feature_flush);
	printk(KERN_INFO "blkfront: %s: %s: %s\n",
	       info->gd->disk_name,
	       info->flush_op == BLKIF_OP_WRITE_BARRIER ?
		"barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
		"flush diskcache" : "barrier or flush"),
	       info->feature_flush ? "enabled" : "disabled");
}

static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
{
	int major;
	major = BLKIF_MAJOR(vdevice);
	*minor = BLKIF_MINOR(vdevice);
	switch (major) {
		case XEN_IDE0_MAJOR:
			*offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
			*minor = ((*minor / 64) * PARTS_PER_DISK) +
				EMULATED_HD_DISK_MINOR_OFFSET;
			break;
		case XEN_IDE1_MAJOR:
			*offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
			*minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
				EMULATED_HD_DISK_MINOR_OFFSET;
			break;
		case XEN_SCSI_DISK0_MAJOR:
			*offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
			*minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
			break;
		case XEN_SCSI_DISK1_MAJOR:
		case XEN_SCSI_DISK2_MAJOR:
		case XEN_SCSI_DISK3_MAJOR:
		case XEN_SCSI_DISK4_MAJOR:
		case XEN_SCSI_DISK5_MAJOR:
		case XEN_SCSI_DISK6_MAJOR:
		case XEN_SCSI_DISK7_MAJOR:
			*offset = (*minor / PARTS_PER_DISK) + 
				((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
				EMULATED_SD_DISK_NAME_OFFSET;
			*minor = *minor +
				((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
				EMULATED_SD_DISK_MINOR_OFFSET;
			break;
		case XEN_SCSI_DISK8_MAJOR:
		case XEN_SCSI_DISK9_MAJOR:
		case XEN_SCSI_DISK10_MAJOR:
		case XEN_SCSI_DISK11_MAJOR:
		case XEN_SCSI_DISK12_MAJOR:
		case XEN_SCSI_DISK13_MAJOR:
		case XEN_SCSI_DISK14_MAJOR:
		case XEN_SCSI_DISK15_MAJOR:
			*offset = (*minor / PARTS_PER_DISK) + 
				((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
				EMULATED_SD_DISK_NAME_OFFSET;
			*minor = *minor +
				((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
				EMULATED_SD_DISK_MINOR_OFFSET;
			break;
		case XENVBD_MAJOR:
			*offset = *minor / PARTS_PER_DISK;
			break;
		default:
			printk(KERN_WARNING "blkfront: your disk configuration is "
					"incorrect, please use an xvd device instead\n");
			return -ENODEV;
	}
	return 0;
}

static char *encode_disk_name(char *ptr, unsigned int n)
{
	if (n >= 26)
		ptr = encode_disk_name(ptr, n / 26 - 1);
	*ptr = 'a' + n % 26;
	return ptr + 1;
}

static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
			       struct blkfront_info *info,
			       u16 vdisk_info, u16 sector_size)
{
	struct gendisk *gd;
	int nr_minors = 1;
	int err;
	unsigned int offset;
	int minor;
	int nr_parts;
	char *ptr;

	BUG_ON(info->gd != NULL);
	BUG_ON(info->rq != NULL);

	if ((info->vdevice>>EXT_SHIFT) > 1) {
		/* this is above the extended range; something is wrong */
		printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
		return -ENODEV;
	}

	if (!VDEV_IS_EXTENDED(info->vdevice)) {
		err = xen_translate_vdev(info->vdevice, &minor, &offset);
		if (err)
			return err;		
 		nr_parts = PARTS_PER_DISK;
	} else {
		minor = BLKIF_MINOR_EXT(info->vdevice);
		nr_parts = PARTS_PER_EXT_DISK;
		offset = minor / nr_parts;
		if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
			printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
					"emulated IDE disks,\n\t choose an xvd device name"
					"from xvde on\n", info->vdevice);
	}
	if (minor >> MINORBITS) {
		pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
			info->vdevice, minor);
		return -ENODEV;
	}

	if ((minor % nr_parts) == 0)
		nr_minors = nr_parts;

	err = xlbd_reserve_minors(minor, nr_minors);
	if (err)
		goto out;
	err = -ENODEV;

	gd = alloc_disk(nr_minors);
	if (gd == NULL)
		goto release;

	strcpy(gd->disk_name, DEV_NAME);
	ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
	BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
	if (nr_minors > 1)
		*ptr = 0;
	else
		snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
			 "%d", minor & (nr_parts - 1));

	gd->major = XENVBD_MAJOR;
	gd->first_minor = minor;
	gd->fops = &xlvbd_block_fops;
	gd->private_data = info;
	gd->driverfs_dev = &(info->xbdev->dev);
	set_capacity(gd, capacity);

	if (xlvbd_init_blk_queue(gd, sector_size)) {
		del_gendisk(gd);
		goto release;
	}

	info->rq = gd->queue;
	info->gd = gd;

	xlvbd_flush(info);

	if (vdisk_info & VDISK_READONLY)
		set_disk_ro(gd, 1);

	if (vdisk_info & VDISK_REMOVABLE)
		gd->flags |= GENHD_FL_REMOVABLE;

	if (vdisk_info & VDISK_CDROM)
		gd->flags |= GENHD_FL_CD;

	return 0;

 release:
	xlbd_release_minors(minor, nr_minors);
 out:
	return err;
}

static void xlvbd_release_gendisk(struct blkfront_info *info)
{
	unsigned int minor, nr_minors;
	unsigned long flags;

	if (info->rq == NULL)
		return;

	spin_lock_irqsave(&info->io_lock, flags);

	/* No more blkif_request(). */
	blk_stop_queue(info->rq);

	/* No more gnttab callback work. */
	gnttab_cancel_free_callback(&info->callback);
	spin_unlock_irqrestore(&info->io_lock, flags);

	/* Flush gnttab callback work. Must be done with no locks held. */
	flush_work_sync(&info->work);

	del_gendisk(info->gd);

	minor = info->gd->first_minor;
	nr_minors = info->gd->minors;
	xlbd_release_minors(minor, nr_minors);

	blk_cleanup_queue(info->rq);
	info->rq = NULL;

	put_disk(info->gd);
	info->gd = NULL;
}

static void kick_pending_request_queues(struct blkfront_info *info)
{
	if (!RING_FULL(&info->ring)) {
		/* Re-enable calldowns. */
		blk_start_queue(info->rq);
		/* Kick things off immediately. */
		do_blkif_request(info->rq);
	}
}

static void blkif_restart_queue(struct work_struct *work)
{
	struct blkfront_info *info = container_of(work, struct blkfront_info, work);

	spin_lock_irq(&info->io_lock);
	if (info->connected == BLKIF_STATE_CONNECTED)
		kick_pending_request_queues(info);
	spin_unlock_irq(&info->io_lock);
}

static void blkif_free(struct blkfront_info *info, int suspend)
{
	/* Prevent new requests being issued until we fix things up. */
	spin_lock_irq(&info->io_lock);
	info->connected = suspend ?
		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
	/* No more blkif_request(). */
	if (info->rq)
		blk_stop_queue(info->rq);
	/* No more gnttab callback work. */
	gnttab_cancel_free_callback(&info->callback);
	spin_unlock_irq(&info->io_lock);

	/* Flush gnttab callback work. Must be done with no locks held. */
	flush_work_sync(&info->work);

	/* Free resources associated with old device channel. */
	if (info->ring_ref != GRANT_INVALID_REF) {
		gnttab_end_foreign_access(info->ring_ref, 0,
					  (unsigned long)info->ring.sring);
		info->ring_ref = GRANT_INVALID_REF;
		info->ring.sring = NULL;
	}
	if (info->irq)
		unbind_from_irqhandler(info->irq, info);
	info->evtchn = info->irq = 0;

}

static void blkif_completion(struct blk_shadow *s)
{
	int i;
	/* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
	 * flag. */
	for (i = 0; i < s->req.u.rw.nr_segments; i++)
		gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
}

static irqreturn_t blkif_interrupt(int irq, void *dev_id)
{
	struct request *req;
	struct blkif_response *bret;
	RING_IDX i, rp;
	unsigned long flags;
	struct blkfront_info *info = (struct blkfront_info *)dev_id;
	int error;

	spin_lock_irqsave(&info->io_lock, flags);

	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
		spin_unlock_irqrestore(&info->io_lock, flags);
		return IRQ_HANDLED;
	}

 again:
	rp = info->ring.sring->rsp_prod;
	rmb(); /* Ensure we see queued responses up to 'rp'. */

	for (i = info->ring.rsp_cons; i != rp; i++) {
		unsigned long id;

		bret = RING_GET_RESPONSE(&info->ring, i);
		id   = bret->id;
		req  = info->shadow[id].request;

		if (bret->operation != BLKIF_OP_DISCARD)
			blkif_completion(&info->shadow[id]);

		add_id_to_freelist(info, id);

		error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
		switch (bret->operation) {
		case BLKIF_OP_DISCARD:
			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
				struct request_queue *rq = info->rq;
				printk(KERN_WARNING "blkfront: %s: discard op failed\n",
					   info->gd->disk_name);
				error = -EOPNOTSUPP;
				info->feature_discard = 0;
				info->feature_secdiscard = 0;
				queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
				queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
			}
			__blk_end_request_all(req, error);
			break;
		case BLKIF_OP_FLUSH_DISKCACHE:
		case BLKIF_OP_WRITE_BARRIER:
			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
				printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
				       info->flush_op == BLKIF_OP_WRITE_BARRIER ?
				       "barrier" :  "flush disk cache",
				       info->gd->disk_name);
				error = -EOPNOTSUPP;
			}
			if (unlikely(bret->status == BLKIF_RSP_ERROR &&
				     info->shadow[id].req.u.rw.nr_segments == 0)) {
				printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
				       info->flush_op == BLKIF_OP_WRITE_BARRIER ?
				       "barrier" :  "flush disk cache",
				       info->gd->disk_name);
				error = -EOPNOTSUPP;
			}
			if (unlikely(error)) {
				if (error == -EOPNOTSUPP)
					error = 0;
				info->feature_flush = 0;
				info->flush_op = 0;
				xlvbd_flush(info);
			}
			/* fall through */
		case BLKIF_OP_READ:
		case BLKIF_OP_WRITE:
			if (unlikely(bret->status != BLKIF_RSP_OKAY))
				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
					"request: %x\n", bret->status);

			__blk_end_request_all(req, error);
			break;
		default:
			BUG();
		}
	}

	info->ring.rsp_cons = i;

	if (i != info->ring.req_prod_pvt) {
		int more_to_do;
		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
		if (more_to_do)
			goto again;
	} else
		info->ring.sring->rsp_event = i + 1;

	kick_pending_request_queues(info);

	spin_unlock_irqrestore(&info->io_lock, flags);

	return IRQ_HANDLED;
}


static int setup_blkring(struct xenbus_device *dev,
			 struct blkfront_info *info)
{
	struct blkif_sring *sring;
	int err;

	info->ring_ref = GRANT_INVALID_REF;

	sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH);
	if (!sring) {
		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
		return -ENOMEM;
	}
	SHARED_RING_INIT(sring);
	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);

	sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);

	err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
	if (err < 0) {
		free_page((unsigned long)sring);
		info->ring.sring = NULL;
		goto fail;
	}
	info->ring_ref = err;

	err = xenbus_alloc_evtchn(dev, &info->evtchn);
	if (err)
		goto fail;

	err = bind_evtchn_to_irqhandler(info->evtchn,
					blkif_interrupt,
					IRQF_SAMPLE_RANDOM, "blkif", info);
	if (err <= 0) {
		xenbus_dev_fatal(dev, err,
				 "bind_evtchn_to_irqhandler failed");
		goto fail;
	}
	info->irq = err;

	return 0;
fail:
	blkif_free(info, 0);
	return err;
}


/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
			   struct blkfront_info *info)
{
	const char *message = NULL;
	struct xenbus_transaction xbt;
	int err;

	/* Create shared ring, alloc event channel. */
	err = setup_blkring(dev, info);
	if (err)
		goto out;

again:
	err = xenbus_transaction_start(&xbt);
	if (err) {
		xenbus_dev_fatal(dev, err, "starting transaction");
		goto destroy_blkring;
	}

	err = xenbus_printf(xbt, dev->nodename,
			    "ring-ref", "%u", info->ring_ref);
	if (err) {
		message = "writing ring-ref";
		goto abort_transaction;
	}
	err = xenbus_printf(xbt, dev->nodename,
			    "event-channel", "%u", info->evtchn);
	if (err) {
		message = "writing event-channel";
		goto abort_transaction;
	}
	err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
			    XEN_IO_PROTO_ABI_NATIVE);
	if (err) {
		message = "writing protocol";
		goto abort_transaction;
	}

	err = xenbus_transaction_end(xbt, 0);
	if (err) {
		if (err == -EAGAIN)
			goto again;
		xenbus_dev_fatal(dev, err, "completing transaction");
		goto destroy_blkring;
	}

	xenbus_switch_state(dev, XenbusStateInitialised);

	return 0;

 abort_transaction:
	xenbus_transaction_end(xbt, 1);
	if (message)
		xenbus_dev_fatal(dev, err, "%s", message);
 destroy_blkring:
	blkif_free(info, 0);
 out:
	return err;
}

/**
 * Entry point to this code when a new device is created.  Allocate the basic
 * structures and the ring buffer for communication with the backend, and
 * inform the backend of the appropriate details for those.  Switch to
 * Initialised state.
 */
static int blkfront_probe(struct xenbus_device *dev,
			  const struct xenbus_device_id *id)
{
	int err, vdevice, i;
	struct blkfront_info *info;

	/* FIXME: Use dynamic device id if this is not set. */
	err = xenbus_scanf(XBT_NIL, dev->nodename,
			   "virtual-device", "%i", &vdevice);
	if (err != 1) {
		/* go looking in the extended area instead */
		err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
				   "%i", &vdevice);
		if (err != 1) {
			xenbus_dev_fatal(dev, err, "reading virtual-device");
			return err;
		}
	}

	if (xen_hvm_domain()) {
		char *type;
		int len;
		/* no unplug has been done: do not hook devices != xen vbds */
		if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
			int major;

			if (!VDEV_IS_EXTENDED(vdevice))
				major = BLKIF_MAJOR(vdevice);
			else
				major = XENVBD_MAJOR;

			if (major != XENVBD_MAJOR) {
				printk(KERN_INFO
						"%s: HVM does not support vbd %d as xen block device\n",
						__FUNCTION__, vdevice);
				return -ENODEV;
			}
		}
		/* do not create a PV cdrom device if we are an HVM guest */
		type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
		if (IS_ERR(type))
			return -ENODEV;
		if (strncmp(type, "cdrom", 5) == 0) {
			kfree(type);
			return -ENODEV;
		}
		kfree(type);
	}
	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
		return -ENOMEM;
	}

	mutex_init(&info->mutex);
	spin_lock_init(&info->io_lock);
	info->xbdev = dev;
	info->vdevice = vdevice;
	info->connected = BLKIF_STATE_DISCONNECTED;
	INIT_WORK(&info->work, blkif_restart_queue);

	for (i = 0; i < BLK_RING_SIZE; i++)
		info->shadow[i].req.u.rw.id = i+1;
	info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;

	/* Front end dir is a number, which is used as the id. */
	info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
	dev_set_drvdata(&dev->dev, info);

	err = talk_to_blkback(dev, info);
	if (err) {
		kfree(info);
		dev_set_drvdata(&dev->dev, NULL);
		return err;
	}

	return 0;
}


static int blkif_recover(struct blkfront_info *info)
{
	int i;
	struct blkif_request *req;
	struct blk_shadow *copy;
	int j;

	/* Stage 1: Make a safe copy of the shadow state. */
	copy = kmalloc(sizeof(info->shadow),
		       GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
	if (!copy)
		return -ENOMEM;
	memcpy(copy, info->shadow, sizeof(info->shadow));

	/* Stage 2: Set up free list. */
	memset(&info->shadow, 0, sizeof(info->shadow));
	for (i = 0; i < BLK_RING_SIZE; i++)
		info->shadow[i].req.u.rw.id = i+1;
	info->shadow_free = info->ring.req_prod_pvt;
	info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;

	/* Stage 3: Find pending requests and requeue them. */
	for (i = 0; i < BLK_RING_SIZE; i++) {
		/* Not in use? */
		if (!copy[i].request)
			continue;

		/* Grab a request slot and copy shadow state into it. */
		req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
		*req = copy[i].req;

		/* We get a new request id, and must reset the shadow state. */
		req->u.rw.id = get_id_from_freelist(info);
		memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i]));

		if (req->operation != BLKIF_OP_DISCARD) {
		/* Rewrite any grant references invalidated by susp/resume. */
			for (j = 0; j < req->u.rw.nr_segments; j++)
				gnttab_grant_foreign_access_ref(
					req->u.rw.seg[j].gref,
					info->xbdev->otherend_id,
					pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
					rq_data_dir(info->shadow[req->u.rw.id].request));
		}
		info->shadow[req->u.rw.id].req = *req;

		info->ring.req_prod_pvt++;
	}

	kfree(copy);

	xenbus_switch_state(info->xbdev, XenbusStateConnected);

	spin_lock_irq(&info->io_lock);

	/* Now safe for us to use the shared ring */
	info->connected = BLKIF_STATE_CONNECTED;

	/* Send off requeued requests */
	flush_requests(info);

	/* Kick any other new requests queued since we resumed */
	kick_pending_request_queues(info);

	spin_unlock_irq(&info->io_lock);

	return 0;
}

/**
 * We are reconnecting to the backend, due to a suspend/resume, or a backend
 * driver restart.  We tear down our blkif structure and recreate it, but
 * leave the device-layer structures intact so that this is transparent to the
 * rest of the kernel.
 */
static int blkfront_resume(struct xenbus_device *dev)
{
	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
	int err;

	dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);

	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);

	err = talk_to_blkback(dev, info);
	if (info->connected == BLKIF_STATE_SUSPENDED && !err)
		err = blkif_recover(info);

	return err;
}

static void
blkfront_closing(struct blkfront_info *info)
{
	struct xenbus_device *xbdev = info->xbdev;
	struct block_device *bdev = NULL;

	mutex_lock(&info->mutex);

	if (xbdev->state == XenbusStateClosing) {
		mutex_unlock(&info->mutex);
		return;
	}

	if (info->gd)
		bdev = bdget_disk(info->gd, 0);

	mutex_unlock(&info->mutex);

	if (!bdev) {
		xenbus_frontend_closed(xbdev);
		return;
	}

	mutex_lock(&bdev->bd_mutex);

	if (bdev->bd_openers) {
		xenbus_dev_error(xbdev, -EBUSY,
				 "Device in use; refusing to close");
		xenbus_switch_state(xbdev, XenbusStateClosing);
	} else {
		xlvbd_release_gendisk(info);
		xenbus_frontend_closed(xbdev);
	}

	mutex_unlock(&bdev->bd_mutex);
	bdput(bdev);
}

static void blkfront_setup_discard(struct blkfront_info *info)
{
	int err;
	char *type;
	unsigned int discard_granularity;
	unsigned int discard_alignment;
	unsigned int discard_secure;

	type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
	if (IS_ERR(type))
		return;

	info->feature_secdiscard = 0;
	if (strncmp(type, "phy", 3) == 0) {
		err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			"discard-granularity", "%u", &discard_granularity,
			"discard-alignment", "%u", &discard_alignment,
			NULL);
		if (!err) {
			info->feature_discard = 1;
			info->discard_granularity = discard_granularity;
			info->discard_alignment = discard_alignment;
		}
		err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			    "discard-secure", "%d", &discard_secure,
			    NULL);
		if (!err)
			info->feature_secdiscard = discard_secure;

	} else if (strncmp(type, "file", 4) == 0)
		info->feature_discard = 1;

	kfree(type);
}

/*
 * Invoked when the backend is finally 'ready' (and has told produced
 * the details about the physical device - #sectors, size, etc).
 */
static void blkfront_connect(struct blkfront_info *info)
{
	unsigned long long sectors;
	unsigned long sector_size;
	unsigned int binfo;
	int err;
	int barrier, flush, discard;

	switch (info->connected) {
	case BLKIF_STATE_CONNECTED:
		/*
		 * Potentially, the back-end may be signalling
		 * a capacity change; update the capacity.
		 */
		err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
				   "sectors", "%Lu", &sectors);
		if (XENBUS_EXIST_ERR(err))
			return;
		printk(KERN_INFO "Setting capacity to %Lu\n",
		       sectors);
		set_capacity(info->gd, sectors);
		revalidate_disk(info->gd);

		/* fall through */
	case BLKIF_STATE_SUSPENDED:
		return;

	default:
		break;
	}

	dev_dbg(&info->xbdev->dev, "%s:%s.\n",
		__func__, info->xbdev->otherend);

	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			    "sectors", "%llu", &sectors,
			    "info", "%u", &binfo,
			    "sector-size", "%lu", &sector_size,
			    NULL);
	if (err) {
		xenbus_dev_fatal(info->xbdev, err,
				 "reading backend fields at %s",
				 info->xbdev->otherend);
		return;
	}

	info->feature_flush = 0;
	info->flush_op = 0;

	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			    "feature-barrier", "%d", &barrier,
			    NULL);

	/*
	 * If there's no "feature-barrier" defined, then it means
	 * we're dealing with a very old backend which writes
	 * synchronously; nothing to do.
	 *
	 * If there are barriers, then we use flush.
	 */
	if (!err && barrier) {
		info->feature_flush = REQ_FLUSH | REQ_FUA;
		info->flush_op = BLKIF_OP_WRITE_BARRIER;
	}
	/*
	 * And if there is "feature-flush-cache" use that above
	 * barriers.
	 */
	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			    "feature-flush-cache", "%d", &flush,
			    NULL);

	if (!err && flush) {
		info->feature_flush = REQ_FLUSH;
		info->flush_op = BLKIF_OP_FLUSH_DISKCACHE;
	}

	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			    "feature-discard", "%d", &discard,
			    NULL);

	if (!err && discard)
		blkfront_setup_discard(info);

	err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
	if (err) {
		xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
				 info->xbdev->otherend);
		return;
	}

	xenbus_switch_state(info->xbdev, XenbusStateConnected);

	/* Kick pending requests. */
	spin_lock_irq(&info->io_lock);
	info->connected = BLKIF_STATE_CONNECTED;
	kick_pending_request_queues(info);
	spin_unlock_irq(&info->io_lock);

	add_disk(info->gd);

	info->is_ready = 1;
}

/**
 * Callback received when the backend's state changes.
 */
static void blkback_changed(struct xenbus_device *dev,
			    enum xenbus_state backend_state)
{
	struct blkfront_info *info = dev_get_drvdata(&dev->dev);

	dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);

	switch (backend_state) {
	case XenbusStateInitialising:
	case XenbusStateInitWait:
	case XenbusStateInitialised:
	case XenbusStateReconfiguring:
	case XenbusStateReconfigured:
	case XenbusStateUnknown:
	case XenbusStateClosed:
		break;

	case XenbusStateConnected:
		blkfront_connect(info);
		break;

	case XenbusStateClosing:
		blkfront_closing(info);
		break;
	}
}

static int blkfront_remove(struct xenbus_device *xbdev)
{
	struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
	struct block_device *bdev = NULL;
	struct gendisk *disk;

	dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);

	blkif_free(info, 0);

	mutex_lock(&info->mutex);

	disk = info->gd;
	if (disk)
		bdev = bdget_disk(disk, 0);

	info->xbdev = NULL;
	mutex_unlock(&info->mutex);

	if (!bdev) {
		kfree(info);
		return 0;
	}

	/*
	 * The xbdev was removed before we reached the Closed
	 * state. See if it's safe to remove the disk. If the bdev
	 * isn't closed yet, we let release take care of it.
	 */

	mutex_lock(&bdev->bd_mutex);
	info = disk->private_data;

	dev_warn(disk_to_dev(disk),
		 "%s was hot-unplugged, %d stale handles\n",
		 xbdev->nodename, bdev->bd_openers);

	if (info && !bdev->bd_openers) {
		xlvbd_release_gendisk(info);
		disk->private_data = NULL;
		kfree(info);
	}

	mutex_unlock(&bdev->bd_mutex);
	bdput(bdev);

	return 0;
}

static int blkfront_is_ready(struct xenbus_device *dev)
{
	struct blkfront_info *info = dev_get_drvdata(&dev->dev);

	return info->is_ready && info->xbdev;
}

static int blkif_open(struct block_device *bdev, fmode_t mode)
{
	struct gendisk *disk = bdev->bd_disk;
	struct blkfront_info *info;
	int err = 0;

	mutex_lock(&blkfront_mutex);

	info = disk->private_data;
	if (!info) {
		/* xbdev gone */
		err = -ERESTARTSYS;
		goto out;
	}

	mutex_lock(&info->mutex);

	if (!info->gd)
		/* xbdev is closed */
		err = -ERESTARTSYS;

	mutex_unlock(&info->mutex);

out:
	mutex_unlock(&blkfront_mutex);
	return err;
}

static int blkif_release(struct gendisk *disk, fmode_t mode)
{
	struct blkfront_info *info = disk->private_data;
	struct block_device *bdev;
	struct xenbus_device *xbdev;

	mutex_lock(&blkfront_mutex);

	bdev = bdget_disk(disk, 0);

	if (bdev->bd_openers)
		goto out;

	/*
	 * Check if we have been instructed to close. We will have
	 * deferred this request, because the bdev was still open.
	 */

	mutex_lock(&info->mutex);
	xbdev = info->xbdev;

	if (xbdev && xbdev->state == XenbusStateClosing) {
		/* pending switch to state closed */
		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
		xlvbd_release_gendisk(info);
		xenbus_frontend_closed(info->xbdev);
 	}

	mutex_unlock(&info->mutex);

	if (!xbdev) {
		/* sudden device removal */
		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
		xlvbd_release_gendisk(info);
		disk->private_data = NULL;
		kfree(info);
	}

out:
	bdput(bdev);
	mutex_unlock(&blkfront_mutex);
	return 0;
}

static const struct block_device_operations xlvbd_block_fops =
{
	.owner = THIS_MODULE,
	.open = blkif_open,
	.release = blkif_release,
	.getgeo = blkif_getgeo,
	.ioctl = blkif_ioctl,
};


static const struct xenbus_device_id blkfront_ids[] = {
	{ "vbd" },
	{ "" }
};

static DEFINE_XENBUS_DRIVER(blkfront, ,
	.probe = blkfront_probe,
	.remove = blkfront_remove,
	.resume = blkfront_resume,
	.otherend_changed = blkback_changed,
	.is_ready = blkfront_is_ready,
);

static int __init xlblk_init(void)
{
	int ret;

	if (!xen_domain())
		return -ENODEV;

	if (xen_hvm_domain() && !xen_platform_pci_unplug)
		return -ENODEV;

	if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
		printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
		       XENVBD_MAJOR, DEV_NAME);
		return -ENODEV;
	}

	ret = xenbus_register_frontend(&blkfront_driver);
	if (ret) {
		unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
		return ret;
	}

	return 0;
}
module_init(xlblk_init);


static void __exit xlblk_exit(void)
{
	xenbus_unregister_driver(&blkfront_driver);
	unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
	kfree(minors);
}
module_exit(xlblk_exit);

MODULE_DESCRIPTION("Xen virtual block device frontend");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
MODULE_ALIAS("xen:vbd");
MODULE_ALIAS("xenblk");
g as MAC_ACCESS_REQ stays 1), * but they do not indicate that embedded SRAM is restored yet; * 3945 and 4965 have volatile SRAM, and must save/restore contents * to/from host DRAM when sleeping/waking for power-saving. * Each direction takes approximately 1/4 millisecond; with this * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a * series of register accesses are expected (e.g. reading Event Log), * to keep device from sleeping. * * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that * SRAM is okay/restored. We don't check that here because this call * is just for hardware register access; but GP1 MAC_SLEEP check is a * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). * */ ret = _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (unlikely(ret < 0)) { val = _il_rd(il, CSR_GP_CNTRL); WARN_ONCE(1, "Timeout waiting for ucode processor access " "(CSR_GP_CNTRL 0x%08x)\n", val); _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); return false; } return true; } EXPORT_SYMBOL_GPL(_il_grab_nic_access); int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout) { const int interval = 10; /* microseconds */ int t = 0; do { if ((il_rd(il, addr) & mask) == mask) return t; udelay(interval); t += interval; } while (t < timeout); return -ETIMEDOUT; } EXPORT_SYMBOL(il_poll_bit); u32 il_rd_prph(struct il_priv *il, u32 reg) { unsigned long reg_flags; u32 val; spin_lock_irqsave(&il->reg_lock, reg_flags); _il_grab_nic_access(il); val = _il_rd_prph(il, reg); _il_release_nic_access(il); spin_unlock_irqrestore(&il->reg_lock, reg_flags); return val; } EXPORT_SYMBOL(il_rd_prph); void il_wr_prph(struct il_priv *il, u32 addr, u32 val) { unsigned long reg_flags; spin_lock_irqsave(&il->reg_lock, reg_flags); if (likely(_il_grab_nic_access(il))) { _il_wr_prph(il, addr, val); _il_release_nic_access(il); } spin_unlock_irqrestore(&il->reg_lock, reg_flags); } EXPORT_SYMBOL(il_wr_prph); u32 il_read_targ_mem(struct il_priv *il, u32 addr) { unsigned long reg_flags; u32 value; spin_lock_irqsave(&il->reg_lock, reg_flags); _il_grab_nic_access(il); _il_wr(il, HBUS_TARG_MEM_RADDR, addr); value = _il_rd(il, HBUS_TARG_MEM_RDAT); _il_release_nic_access(il); spin_unlock_irqrestore(&il->reg_lock, reg_flags); return value; } EXPORT_SYMBOL(il_read_targ_mem); void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val) { unsigned long reg_flags; spin_lock_irqsave(&il->reg_lock, reg_flags); if (likely(_il_grab_nic_access(il))) { _il_wr(il, HBUS_TARG_MEM_WADDR, addr); _il_wr(il, HBUS_TARG_MEM_WDAT, val); _il_release_nic_access(il); } spin_unlock_irqrestore(&il->reg_lock, reg_flags); } EXPORT_SYMBOL(il_write_targ_mem); const char * il_get_cmd_string(u8 cmd) { switch (cmd) { IL_CMD(N_ALIVE); IL_CMD(N_ERROR); IL_CMD(C_RXON); IL_CMD(C_RXON_ASSOC); IL_CMD(C_QOS_PARAM); IL_CMD(C_RXON_TIMING); IL_CMD(C_ADD_STA); IL_CMD(C_REM_STA); IL_CMD(C_WEPKEY); IL_CMD(N_3945_RX); IL_CMD(C_TX); IL_CMD(C_RATE_SCALE); IL_CMD(C_LEDS); IL_CMD(C_TX_LINK_QUALITY_CMD); IL_CMD(C_CHANNEL_SWITCH); IL_CMD(N_CHANNEL_SWITCH); IL_CMD(C_SPECTRUM_MEASUREMENT); IL_CMD(N_SPECTRUM_MEASUREMENT); IL_CMD(C_POWER_TBL); IL_CMD(N_PM_SLEEP); IL_CMD(N_PM_DEBUG_STATS); IL_CMD(C_SCAN); IL_CMD(C_SCAN_ABORT); IL_CMD(N_SCAN_START); IL_CMD(N_SCAN_RESULTS); IL_CMD(N_SCAN_COMPLETE); IL_CMD(N_BEACON); IL_CMD(C_TX_BEACON); IL_CMD(C_TX_PWR_TBL); IL_CMD(C_BT_CONFIG); IL_CMD(C_STATS); IL_CMD(N_STATS); IL_CMD(N_CARD_STATE); IL_CMD(N_MISSED_BEACONS); IL_CMD(C_CT_KILL_CONFIG); IL_CMD(C_SENSITIVITY); IL_CMD(C_PHY_CALIBRATION); IL_CMD(N_RX_PHY); IL_CMD(N_RX_MPDU); IL_CMD(N_RX); IL_CMD(N_COMPRESSED_BA); default: return "UNKNOWN"; } } EXPORT_SYMBOL(il_get_cmd_string); #define HOST_COMPLETE_TIMEOUT (HZ / 2) static void il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd, struct il_rx_pkt *pkt) { if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { IL_ERR("Bad return from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); return; } #ifdef CONFIG_IWLEGACY_DEBUG switch (cmd->hdr.cmd) { case C_TX_LINK_QUALITY_CMD: case C_SENSITIVITY: D_HC_DUMP("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); break; default: D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); } #endif } static int il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd) { int ret; BUG_ON(!(cmd->flags & CMD_ASYNC)); /* An asynchronous command can not expect an SKB to be set. */ BUG_ON(cmd->flags & CMD_WANT_SKB); /* Assign a generic callback if one is not provided */ if (!cmd->callback) cmd->callback = il_generic_cmd_callback; if (test_bit(S_EXIT_PENDING, &il->status)) return -EBUSY; ret = il_enqueue_hcmd(il, cmd); if (ret < 0) { IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", il_get_cmd_string(cmd->id), ret); return ret; } return 0; } int il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd) { int cmd_idx; int ret; lockdep_assert_held(&il->mutex); BUG_ON(cmd->flags & CMD_ASYNC); /* A synchronous command can not have a callback set. */ BUG_ON(cmd->callback); D_INFO("Attempting to send sync command %s\n", il_get_cmd_string(cmd->id)); set_bit(S_HCMD_ACTIVE, &il->status); D_INFO("Setting HCMD_ACTIVE for command %s\n", il_get_cmd_string(cmd->id)); cmd_idx = il_enqueue_hcmd(il, cmd); if (cmd_idx < 0) { ret = cmd_idx; IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", il_get_cmd_string(cmd->id), ret); goto out; } ret = wait_event_timeout(il->wait_command_queue, !test_bit(S_HCMD_ACTIVE, &il->status), HOST_COMPLETE_TIMEOUT); if (!ret) { if (test_bit(S_HCMD_ACTIVE, &il->status)) { IL_ERR("Error sending %s: time out after %dms.\n", il_get_cmd_string(cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); clear_bit(S_HCMD_ACTIVE, &il->status); D_INFO("Clearing HCMD_ACTIVE for command %s\n", il_get_cmd_string(cmd->id)); ret = -ETIMEDOUT; goto cancel; } } if (test_bit(S_RFKILL, &il->status)) { IL_ERR("Command %s aborted: RF KILL Switch\n", il_get_cmd_string(cmd->id)); ret = -ECANCELED; goto fail; } if (test_bit(S_FW_ERROR, &il->status)) { IL_ERR("Command %s failed: FW Error\n", il_get_cmd_string(cmd->id)); ret = -EIO; goto fail; } if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { IL_ERR("Error: Response NULL in '%s'\n", il_get_cmd_string(cmd->id)); ret = -EIO; goto cancel; } ret = 0; goto out; cancel: if (cmd->flags & CMD_WANT_SKB) { /* * Cancel the CMD_WANT_SKB flag for the cmd in the * TX cmd queue. Otherwise in case the cmd comes * in later, it will possibly set an invalid * address (cmd->meta.source). */ il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; } fail: if (cmd->reply_page) { il_free_pages(il, cmd->reply_page); cmd->reply_page = 0; } out: return ret; } EXPORT_SYMBOL(il_send_cmd_sync); int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd) { if (cmd->flags & CMD_ASYNC) return il_send_cmd_async(il, cmd); return il_send_cmd_sync(il, cmd); } EXPORT_SYMBOL(il_send_cmd); int il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data) { struct il_host_cmd cmd = { .id = id, .len = len, .data = data, }; return il_send_cmd_sync(il, &cmd); } EXPORT_SYMBOL(il_send_cmd_pdu); int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data, void (*callback) (struct il_priv *il, struct il_device_cmd *cmd, struct il_rx_pkt *pkt)) { struct il_host_cmd cmd = { .id = id, .len = len, .data = data, }; cmd.flags |= CMD_ASYNC; cmd.callback = callback; return il_send_cmd_async(il, &cmd); } EXPORT_SYMBOL(il_send_cmd_pdu_async); /* default: IL_LED_BLINK(0) using blinking idx table */ static int led_mode; module_param(led_mode, int, S_IRUGO); MODULE_PARM_DESC(led_mode, "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); /* Throughput OFF time(ms) ON time (ms) * >300 25 25 * >200 to 300 40 40 * >100 to 200 55 55 * >70 to 100 65 65 * >50 to 70 75 75 * >20 to 50 85 85 * >10 to 20 95 95 * >5 to 10 110 110 * >1 to 5 130 130 * >0 to 1 167 167 * <=0 SOLID ON */ static const struct ieee80211_tpt_blink il_blink[] = { {.throughput = 0, .blink_time = 334}, {.throughput = 1 * 1024 - 1, .blink_time = 260}, {.throughput = 5 * 1024 - 1, .blink_time = 220}, {.throughput = 10 * 1024 - 1, .blink_time = 190}, {.throughput = 20 * 1024 - 1, .blink_time = 170}, {.throughput = 50 * 1024 - 1, .blink_time = 150}, {.throughput = 70 * 1024 - 1, .blink_time = 130}, {.throughput = 100 * 1024 - 1, .blink_time = 110}, {.throughput = 200 * 1024 - 1, .blink_time = 80}, {.throughput = 300 * 1024 - 1, .blink_time = 50}, }; /* * Adjust led blink rate to compensate on a MAC Clock difference on every HW * Led blink rate analysis showed an average deviation of 0% on 3945, * 5% on 4965 HW. * Need to compensate on the led on/off time per HW according to the deviation * to achieve the desired led frequency * The calculation is: (100-averageDeviation)/100 * blinkTime * For code efficiency the calculation will be: * compensation = (100 - averageDeviation) * 64 / 100 * NewBlinkTime = (compensation * BlinkTime) / 64 */ static inline u8 il_blink_compensation(struct il_priv *il, u8 time, u16 compensation) { if (!compensation) { IL_ERR("undefined blink compensation: " "use pre-defined blinking time\n"); return time; } return (u8) ((time * compensation) >> 6); } /* Set led pattern command */ static int il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off) { struct il_led_cmd led_cmd = { .id = IL_LED_LINK, .interval = IL_DEF_LED_INTRVL }; int ret; if (!test_bit(S_READY, &il->status)) return -EBUSY; if (il->blink_on == on && il->blink_off == off) return 0; if (off == 0) { /* led is SOLID_ON */ on = IL_LED_SOLID; } D_LED("Led blink time compensation=%u\n", il->cfg->led_compensation); led_cmd.on = il_blink_compensation(il, on, il->cfg->led_compensation); led_cmd.off = il_blink_compensation(il, off, il->cfg->led_compensation); ret = il->ops->send_led_cmd(il, &led_cmd); if (!ret) { il->blink_on = on; il->blink_off = off; } return ret; } static void il_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct il_priv *il = container_of(led_cdev, struct il_priv, led); unsigned long on = 0; if (brightness > 0) on = IL_LED_SOLID; il_led_cmd(il, on, 0); } static int il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct il_priv *il = container_of(led_cdev, struct il_priv, led); return il_led_cmd(il, *delay_on, *delay_off); } void il_leds_init(struct il_priv *il) { int mode = led_mode; int ret; if (mode == IL_LED_DEFAULT) mode = il->cfg->led_mode; il->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy)); il->led.brightness_set = il_led_brightness_set; il->led.blink_set = il_led_blink_set; il->led.max_brightness = 1; switch (mode) { case IL_LED_DEFAULT: WARN_ON(1); break; case IL_LED_BLINK: il->led.default_trigger = ieee80211_create_tpt_led_trigger(il->hw, IEEE80211_TPT_LEDTRIG_FL_CONNECTED, il_blink, ARRAY_SIZE(il_blink)); break; case IL_LED_RF_STATE: il->led.default_trigger = ieee80211_get_radio_led_name(il->hw); break; } ret = led_classdev_register(&il->pci_dev->dev, &il->led); if (ret) { kfree(il->led.name); return; } il->led_registered = true; } EXPORT_SYMBOL(il_leds_init); void il_leds_exit(struct il_priv *il) { if (!il->led_registered) return; led_classdev_unregister(&il->led); kfree(il->led.name); } EXPORT_SYMBOL(il_leds_exit); /************************** EEPROM BANDS **************************** * * The il_eeprom_band definitions below provide the mapping from the * EEPROM contents to the specific channel number supported for each * band. * * For example, il_priv->eeprom.band_3_channels[4] from the band_3 * definition below maps to physical channel 42 in the 5.2GHz spectrum. * The specific geography and calibration information for that channel * is contained in the eeprom map itself. * * During init, we copy the eeprom information and channel map * information into il->channel_info_24/52 and il->channel_map_24/52 * * channel_map_24/52 provides the idx in the channel_info array for a * given channel. We have to have two separate maps as there is channel * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and * band_2 * * A value of 0xff stored in the channel_map indicates that the channel * is not supported by the hardware at all. * * A value of 0xfe in the channel_map indicates that the channel is not * valid for Tx with the current hardware. This means that * while the system can tune and receive on a given channel, it may not * be able to associate or transmit any frames on that * channel. There is no corresponding channel information for that * entry. * *********************************************************************/ /* 2.4 GHz */ const u8 il_eeprom_band_1[14] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; /* 5.2 GHz bands */ static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */ 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 }; static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */ 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 }; static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 }; static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */ 145, 149, 153, 157, 161, 165 }; static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */ 1, 2, 3, 4, 5, 6, 7 }; static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */ 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 }; /****************************************************************************** * * EEPROM related functions * ******************************************************************************/ static int il_eeprom_verify_signature(struct il_priv *il) { u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; int ret = 0; D_EEPROM("EEPROM signature=0x%08x\n", gp); switch (gp) { case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: break; default: IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp); ret = -ENOENT; break; } return ret; } const u8 * il_eeprom_query_addr(const struct il_priv *il, size_t offset) { BUG_ON(offset >= il->cfg->eeprom_size); return &il->eeprom[offset]; } EXPORT_SYMBOL(il_eeprom_query_addr); u16 il_eeprom_query16(const struct il_priv *il, size_t offset) { if (!il->eeprom) return 0; return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8); } EXPORT_SYMBOL(il_eeprom_query16); /** * il_eeprom_init - read EEPROM contents * * Load the EEPROM contents from adapter into il->eeprom * * NOTE: This routine uses the non-debug IO access functions. */ int il_eeprom_init(struct il_priv *il) { __le16 *e; u32 gp = _il_rd(il, CSR_EEPROM_GP); int sz; int ret; u16 addr; /* allocate eeprom */ sz = il->cfg->eeprom_size; D_EEPROM("NVM size = %d\n", sz); il->eeprom = kzalloc(sz, GFP_KERNEL); if (!il->eeprom) { ret = -ENOMEM; goto alloc_err; } e = (__le16 *) il->eeprom; il->ops->apm_init(il); ret = il_eeprom_verify_signature(il); if (ret < 0) { IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); ret = -ENOENT; goto err; } /* Make sure driver (instead of uCode) is allowed to read EEPROM */ ret = il->ops->eeprom_acquire_semaphore(il); if (ret < 0) { IL_ERR("Failed to acquire EEPROM semaphore.\n"); ret = -ENOENT; goto err; } /* eeprom is an array of 16bit values */ for (addr = 0; addr < sz; addr += sizeof(u16)) { u32 r; _il_wr(il, CSR_EEPROM_REG, CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); ret = _il_poll_bit(il, CSR_EEPROM_REG, CSR_EEPROM_REG_READ_VALID_MSK, CSR_EEPROM_REG_READ_VALID_MSK, IL_EEPROM_ACCESS_TIMEOUT); if (ret < 0) { IL_ERR("Time out reading EEPROM[%d]\n", addr); goto done; } r = _il_rd(il, CSR_EEPROM_REG); e[addr / 2] = cpu_to_le16(r >> 16); } D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM", il_eeprom_query16(il, EEPROM_VERSION)); ret = 0; done: il->ops->eeprom_release_semaphore(il); err: if (ret) il_eeprom_free(il); /* Reset chip to save power until we load uCode during "up". */ il_apm_stop(il); alloc_err: return ret; } EXPORT_SYMBOL(il_eeprom_init); void il_eeprom_free(struct il_priv *il) { kfree(il->eeprom); il->eeprom = NULL; } EXPORT_SYMBOL(il_eeprom_free); static void il_init_band_reference(const struct il_priv *il, int eep_band, int *eeprom_ch_count, const struct il_eeprom_channel **eeprom_ch_info, const u8 **eeprom_ch_idx) { u32 offset = il->cfg->regulatory_bands[eep_band - 1]; switch (eep_band) { case 1: /* 2.4GHz band */ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1); *eeprom_ch_info = (struct il_eeprom_channel *)il_eeprom_query_addr(il, offset); *eeprom_ch_idx = il_eeprom_band_1; break; case 2: /* 4.9GHz band */ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2); *eeprom_ch_info = (struct il_eeprom_channel *)il_eeprom_query_addr(il, offset); *eeprom_ch_idx = il_eeprom_band_2; break; case 3: /* 5.2GHz band */ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3); *eeprom_ch_info = (struct il_eeprom_channel *)il_eeprom_query_addr(il, offset); *eeprom_ch_idx = il_eeprom_band_3; break; case 4: /* 5.5GHz band */ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4); *eeprom_ch_info = (struct il_eeprom_channel *)il_eeprom_query_addr(il, offset); *eeprom_ch_idx = il_eeprom_band_4; break; case 5: /* 5.7GHz band */ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5); *eeprom_ch_info = (struct il_eeprom_channel *)il_eeprom_query_addr(il, offset); *eeprom_ch_idx = il_eeprom_band_5; break; case 6: /* 2.4GHz ht40 channels */ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6); *eeprom_ch_info = (struct il_eeprom_channel *)il_eeprom_query_addr(il, offset); *eeprom_ch_idx = il_eeprom_band_6; break; case 7: /* 5 GHz ht40 channels */ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7); *eeprom_ch_info = (struct il_eeprom_channel *)il_eeprom_query_addr(il, offset); *eeprom_ch_idx = il_eeprom_band_7; break; default: BUG(); } } #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ ? # x " " : "") /** * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il. * * Does not set up a command, or touch hardware. */ static int il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel, const struct il_eeprom_channel *eeprom_ch, u8 clear_ht40_extension_channel) { struct il_channel_info *ch_info; ch_info = (struct il_channel_info *)il_get_channel_info(il, band, channel); if (!il_is_channel_valid(ch_info)) return -1; D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" " Ad-Hoc %ssupported\n", ch_info->channel, il_is_channel_a_band(ch_info) ? "5.2" : "2.4", CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), CHECK_AND_PRINT(DFS), eeprom_ch->flags, eeprom_ch->max_power_avg, ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); ch_info->ht40_eeprom = *eeprom_ch; ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; ch_info->ht40_flags = eeprom_ch->flags; if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel; return 0; } #define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ ? # x " " : "") /** * il_init_channel_map - Set up driver's info for all possible channels */ int il_init_channel_map(struct il_priv *il) { int eeprom_ch_count = 0; const u8 *eeprom_ch_idx = NULL; const struct il_eeprom_channel *eeprom_ch_info = NULL; int band, ch; struct il_channel_info *ch_info; if (il->channel_count) { D_EEPROM("Channel map already initialized.\n"); return 0; } D_EEPROM("Initializing regulatory info from EEPROM\n"); il->channel_count = ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) + ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) + ARRAY_SIZE(il_eeprom_band_5); D_EEPROM("Parsing data for %d channels.\n", il->channel_count); il->channel_info = kzalloc(sizeof(struct il_channel_info) * il->channel_count, GFP_KERNEL); if (!il->channel_info) { IL_ERR("Could not allocate channel_info\n"); il->channel_count = 0; return -ENOMEM; } ch_info = il->channel_info; /* Loop through the 5 EEPROM bands adding them in order to the * channel map we maintain (that contains additional information than * what just in the EEPROM) */ for (band = 1; band <= 5; band++) { il_init_band_reference(il, band, &eeprom_ch_count, &eeprom_ch_info, &eeprom_ch_idx); /* Loop through each band adding each of the channels */ for (ch = 0; ch < eeprom_ch_count; ch++) { ch_info->channel = eeprom_ch_idx[ch]; ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; /* permanently store EEPROM's channel regulatory flags * and max power in channel info database. */ ch_info->eeprom = eeprom_ch_info[ch]; /* Copy the run-time flags so they are there even on * invalid channels */ ch_info->flags = eeprom_ch_info[ch].flags; /* First write that ht40 is not enabled, and then enable * one by one */ ch_info->ht40_extension_channel = IEEE80211_CHAN_NO_HT40; if (!(il_is_channel_valid(ch_info))) { D_EEPROM("Ch. %d Flags %x [%sGHz] - " "No traffic\n", ch_info->channel, ch_info->flags, il_is_channel_a_band(ch_info) ? "5.2" : "2.4"); ch_info++; continue; } /* Initialize regulatory-based run-time data */ ch_info->max_power_avg = ch_info->curr_txpow = eeprom_ch_info[ch].max_power_avg; ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; ch_info->min_power = 0; D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):" " Ad-Hoc %ssupported\n", ch_info->channel, il_is_channel_a_band(ch_info) ? "5.2" : "2.4", CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(IBSS), CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR), CHECK_AND_PRINT_I(WIDE), CHECK_AND_PRINT_I(DFS), eeprom_ch_info[ch].flags, eeprom_ch_info[ch].max_power_avg, ((eeprom_ch_info[ch]. flags & EEPROM_CHANNEL_IBSS) && !(eeprom_ch_info[ch]. flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); ch_info++; } } /* Check if we do have HT40 channels */ if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) return 0; /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ for (band = 6; band <= 7; band++) { enum ieee80211_band ieeeband; il_init_band_reference(il, band, &eeprom_ch_count, &eeprom_ch_info, &eeprom_ch_idx); /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; /* Loop through each band adding each of the channels */ for (ch = 0; ch < eeprom_ch_count; ch++) { /* Set up driver's info for lower half */ il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch], &eeprom_ch_info[ch], IEEE80211_CHAN_NO_HT40PLUS); /* Set up driver's info for upper half */ il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch] + 4, &eeprom_ch_info[ch], IEEE80211_CHAN_NO_HT40MINUS); } } return 0; } EXPORT_SYMBOL(il_init_channel_map); /* * il_free_channel_map - undo allocations in il_init_channel_map */ void il_free_channel_map(struct il_priv *il) { kfree(il->channel_info); il->channel_count = 0; } EXPORT_SYMBOL(il_free_channel_map); /** * il_get_channel_info - Find driver's ilate channel info * * Based on band and channel number. */ const struct il_channel_info * il_get_channel_info(const struct il_priv *il, enum ieee80211_band band, u16 channel) { int i; switch (band) { case IEEE80211_BAND_5GHZ: for (i = 14; i < il->channel_count; i++) { if (il->channel_info[i].channel == channel) return &il->channel_info[i]; } break; case IEEE80211_BAND_2GHZ: if (channel >= 1 && channel <= 14) return &il->channel_info[channel - 1]; break; default: BUG(); } return NULL; } EXPORT_SYMBOL(il_get_channel_info); /* * Setting power level allows the card to go to sleep when not busy. * * We calculate a sleep command based on the required latency, which * we get from mac80211. In order to handle thermal throttling, we can * also use pre-defined power levels. */ /* * This defines the old power levels. They are still used by default * (level 1) and for thermal throttle (levels 3 through 5) */ struct il_power_vec_entry { struct il_powertable_cmd cmd; u8 no_dtim; /* number of skip dtim */ }; static void il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd) { memset(cmd, 0, sizeof(*cmd)); if (il->power_data.pci_pm) cmd->flags |= IL_POWER_PCI_PM_MSK; D_POWER("Sleep command for CAM\n"); } static int il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd) { D_POWER("Sending power/sleep command\n"); D_POWER("Flags value = 0x%08X\n", cmd->flags); D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", le32_to_cpu(cmd->sleep_interval[0]), le32_to_cpu(cmd->sleep_interval[1]), le32_to_cpu(cmd->sleep_interval[2]), le32_to_cpu(cmd->sleep_interval[3]), le32_to_cpu(cmd->sleep_interval[4])); return il_send_cmd_pdu(il, C_POWER_TBL, sizeof(struct il_powertable_cmd), cmd); } int il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force) { int ret; bool update_chains; lockdep_assert_held(&il->mutex); /* Don't update the RX chain when chain noise calibration is running */ update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE || il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE; if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) return 0; if (!il_is_ready_rf(il)) return -EIO; /* scan complete use sleep_power_next, need to be updated */ memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); if (test_bit(S_SCANNING, &il->status) && !force) { D_INFO("Defer power set mode while scanning\n"); return 0; } if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK) set_bit(S_POWER_PMI, &il->status); ret = il_set_power(il, cmd); if (!ret) { if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)) clear_bit(S_POWER_PMI, &il->status); if (il->ops->update_chain_flags && update_chains) il->ops->update_chain_flags(il); else if (il->ops->update_chain_flags) D_POWER("Cannot update the power, chain noise " "calibration running: %d\n", il->chain_noise_data.state); memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)); } else IL_ERR("set power fail, ret = %d", ret); return ret; } int il_power_update_mode(struct il_priv *il, bool force) { struct il_powertable_cmd cmd; il_power_sleep_cam_cmd(il, &cmd); return il_power_set_mode(il, &cmd, force); } EXPORT_SYMBOL(il_power_update_mode); /* initialize to default */ void il_power_initialize(struct il_priv *il) { u16 lctl = il_pcie_link_ctl(il); il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); il->power_data.debug_sleep_level_override = -1; memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd)); } EXPORT_SYMBOL(il_power_initialize); /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after * sending probe req. This should be set long enough to hear probe responses * from more than one AP. */ #define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ #define IL_ACTIVE_DWELL_TIME_52 (20) #define IL_ACTIVE_DWELL_FACTOR_24GHZ (3) #define IL_ACTIVE_DWELL_FACTOR_52GHZ (2) /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. * Must be set longer than active dwell time. * For the most reliable scan, set > AP beacon interval (typically 100msec). */ #define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ #define IL_PASSIVE_DWELL_TIME_52 (10) #define IL_PASSIVE_DWELL_BASE (100) #define IL_CHANNEL_TUNE_TIME 5 static int il_send_scan_abort(struct il_priv *il) { int ret; struct il_rx_pkt *pkt; struct il_host_cmd cmd = { .id = C_SCAN_ABORT, .flags = CMD_WANT_SKB, }; /* Exit instantly with error when device is not ready * to receive scan abort command or it does not perform * hardware scan currently */ if (!test_bit(S_READY, &il->status) || !test_bit(S_GEO_CONFIGURED, &il->status) || !test_bit(S_SCAN_HW, &il->status) || test_bit(S_FW_ERROR, &il->status) || test_bit(S_EXIT_PENDING, &il->status)) return -EIO; ret = il_send_cmd_sync(il, &cmd); if (ret) return ret; pkt = (struct il_rx_pkt *)cmd.reply_page; if (pkt->u.status != CAN_ABORT_STATUS) { /* The scan abort will return 1 for success or * 2 for "failure". A failure condition can be * due to simply not being in an active scan which * can occur if we send the scan abort before we * the microcode has notified us that a scan is * completed. */ D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status); ret = -EIO; } il_free_pages(il, cmd.reply_page); return ret; } static void il_complete_scan(struct il_priv *il, bool aborted) { /* check if scan was requested from mac80211 */ if (il->scan_request) { D_SCAN("Complete scan in mac80211\n"); ieee80211_scan_completed(il->hw, aborted); } il->scan_vif = NULL; il->scan_request = NULL; } void il_force_scan_end(struct il_priv *il) { lockdep_assert_held(&il->mutex); if (!test_bit(S_SCANNING, &il->status)) { D_SCAN("Forcing scan end while not scanning\n"); return; } D_SCAN("Forcing scan end\n"); clear_bit(S_SCANNING, &il->status); clear_bit(S_SCAN_HW, &il->status); clear_bit(S_SCAN_ABORTING, &il->status); il_complete_scan(il, true); } static void il_do_scan_abort(struct il_priv *il) { int ret; lockdep_assert_held(&il->mutex); if (!test_bit(S_SCANNING, &il->status)) { D_SCAN("Not performing scan to abort\n"); return; } if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) { D_SCAN("Scan abort in progress\n"); return; } ret = il_send_scan_abort(il); if (ret) { D_SCAN("Send scan abort failed %d\n", ret); il_force_scan_end(il); } else D_SCAN("Successfully send scan abort\n"); } /** * il_scan_cancel - Cancel any currently executing HW scan */ int il_scan_cancel(struct il_priv *il) { D_SCAN("Queuing abort scan\n"); queue_work(il->workqueue, &il->abort_scan); return 0; } EXPORT_SYMBOL(il_scan_cancel); /** * il_scan_cancel_timeout - Cancel any currently executing HW scan * @ms: amount of time to wait (in milliseconds) for scan to abort * */ int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms) { unsigned long timeout = jiffies + msecs_to_jiffies(ms); lockdep_assert_held(&il->mutex); D_SCAN("Scan cancel timeout\n"); il_do_scan_abort(il); while (time_before_eq(jiffies, timeout)) { if (!test_bit(S_SCAN_HW, &il->status)) break; msleep(20); } return test_bit(S_SCAN_HW, &il->status); } EXPORT_SYMBOL(il_scan_cancel_timeout); /* Service response to C_SCAN (0x80) */ static void il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb) { #ifdef CONFIG_IWLEGACY_DEBUG struct il_rx_pkt *pkt = rxb_addr(rxb); struct il_scanreq_notification *notif = (struct il_scanreq_notification *)pkt->u.raw; D_SCAN("Scan request status = 0x%x\n", notif->status); #endif } /* Service N_SCAN_START (0x82) */ static void il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb) { struct il_rx_pkt *pkt = rxb_addr(rxb); struct il_scanstart_notification *notif = (struct il_scanstart_notification *)pkt->u.raw; il->scan_start_tsf = le32_to_cpu(notif->tsf_low); D_SCAN("Scan start: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel, notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); } /* Service N_SCAN_RESULTS (0x83) */ static void il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb) { #ifdef CONFIG_IWLEGACY_DEBUG struct il_rx_pkt *pkt = rxb_addr(rxb); struct il_scanresults_notification *notif = (struct il_scanresults_notification *)pkt->u.raw; D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d " "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), le32_to_cpu(notif->stats[0]), le32_to_cpu(notif->tsf_low) - il->scan_start_tsf); #endif } /* Service N_SCAN_COMPLETE (0x84) */ static void il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) { #ifdef CONFIG_IWLEGACY_DEBUG struct il_rx_pkt *pkt = rxb_addr(rxb); struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; #endif D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", scan_notif->scanned_channels, scan_notif->tsf_low, scan_notif->tsf_high, scan_notif->status); /* The HW is no longer scanning */ clear_bit(S_SCAN_HW, &il->status); D_SCAN("Scan on %sGHz took %dms\n", (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", jiffies_to_msecs(jiffies - il->scan_start)); queue_work(il->workqueue, &il->scan_completed); } void il_setup_rx_scan_handlers(struct il_priv *il) { /* scan handlers */ il->handlers[C_SCAN] = il_hdl_scan; il->handlers[N_SCAN_START] = il_hdl_scan_start; il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results; il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete; } EXPORT_SYMBOL(il_setup_rx_scan_handlers); inline u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, u8 n_probes) { if (band == IEEE80211_BAND_5GHZ) return IL_ACTIVE_DWELL_TIME_52 + IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); else return IL_ACTIVE_DWELL_TIME_24 + IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); } EXPORT_SYMBOL(il_get_active_dwell_time); u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band, struct ieee80211_vif *vif) { u16 value; u16 passive = (band == IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE + IL_PASSIVE_DWELL_TIME_52; if (il_is_any_associated(il)) { /* * If we're associated, we clamp the maximum passive * dwell time to be 98% of the smallest beacon interval * (minus 2 * channel tune time) */ value = il->vif ? il->vif->bss_conf.beacon_int : 0; if (value > IL_PASSIVE_DWELL_BASE || !value) value = IL_PASSIVE_DWELL_BASE; value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2; passive = min(value, passive); } return passive; } EXPORT_SYMBOL(il_get_passive_dwell_time); void il_init_scan_params(struct il_priv *il) { u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1; if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ]) il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ]) il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; } EXPORT_SYMBOL(il_init_scan_params); static int il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif) { int ret; lockdep_assert_held(&il->mutex); cancel_delayed_work(&il->scan_check); if (!il_is_ready_rf(il)) { IL_WARN("Request scan called when driver not ready.\n"); return -EIO; } if (test_bit(S_SCAN_HW, &il->status)) { D_SCAN("Multiple concurrent scan requests in parallel.\n"); return -EBUSY; } if (test_bit(S_SCAN_ABORTING, &il->status)) { D_SCAN("Scan request while abort pending.\n"); return -EBUSY; } D_SCAN("Starting scan...\n"); set_bit(S_SCANNING, &il->status); il->scan_start = jiffies; ret = il->ops->request_scan(il, vif); if (ret) { clear_bit(S_SCANNING, &il->status); return ret; } queue_delayed_work(il->workqueue, &il->scan_check, IL_SCAN_CHECK_WATCHDOG); return 0; } int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_scan_request *req) { struct il_priv *il = hw->priv; int ret; if (req->n_channels == 0) { IL_ERR("Can not scan on no channels.\n"); return -EINVAL; } mutex_lock(&il->mutex); D_MAC80211("enter\n"); if (test_bit(S_SCANNING, &il->status)) { D_SCAN("Scan already in progress.\n"); ret = -EAGAIN; goto out_unlock; } /* mac80211 will only ask for one band at a time */ il->scan_request = req; il->scan_vif = vif; il->scan_band = req->channels[0]->band; ret = il_scan_initiate(il, vif); out_unlock: D_MAC80211("leave ret %d\n", ret); mutex_unlock(&il->mutex); return ret; } EXPORT_SYMBOL(il_mac_hw_scan); static void il_bg_scan_check(struct work_struct *data) { struct il_priv *il = container_of(data, struct il_priv, scan_check.work); D_SCAN("Scan check work\n"); /* Since we are here firmware does not finish scan and * most likely is in bad shape, so we don't bother to * send abort command, just force scan complete to mac80211 */ mutex_lock(&il->mutex); il_force_scan_end(il); mutex_unlock(&il->mutex); } /** * il_fill_probe_req - fill in all required fields and IE for probe request */ u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, const u8 *ta, const u8 *ies, int ie_len, int left) { int len = 0; u8 *pos = NULL; /* Make sure there is enough space for the probe request, * two mandatory IEs and the data */ left -= 24; if (left < 0) return 0; frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); memcpy(frame->da, il_bcast_addr, ETH_ALEN); memcpy(frame->sa, ta, ETH_ALEN); memcpy(frame->bssid, il_bcast_addr, ETH_ALEN); frame->seq_ctrl = 0; len += 24; /* ...next IE... */ pos = &frame->u.probe_req.variable[0]; /* fill in our indirect SSID IE */ left -= 2; if (left < 0) return 0; *pos++ = WLAN_EID_SSID; *pos++ = 0; len += 2; if (WARN_ON(left < ie_len)) return len; if (ies && ie_len) { memcpy(pos, ies, ie_len); len += ie_len; } return (u16) len; } EXPORT_SYMBOL(il_fill_probe_req); static void il_bg_abort_scan(struct work_struct *work) { struct il_priv *il = container_of(work, struct il_priv, abort_scan); D_SCAN("Abort scan work\n"); /* We keep scan_check work queued in case when firmware will not * report back scan completed notification */ mutex_lock(&il->mutex); il_scan_cancel_timeout(il, 200); mutex_unlock(&il->mutex); } static void il_bg_scan_completed(struct work_struct *work) { struct il_priv *il = container_of(work, struct il_priv, scan_completed); bool aborted; D_SCAN("Completed scan.\n"); cancel_delayed_work(&il->scan_check); mutex_lock(&il->mutex); aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status); if (aborted) D_SCAN("Aborted scan completed.\n"); if (!test_and_clear_bit(S_SCANNING, &il->status)) { D_SCAN("Scan already completed.\n"); goto out_settings; } il_complete_scan(il, aborted); out_settings: /* Can we still talk to firmware ? */ if (!il_is_ready_rf(il)) goto out; /* * We do not commit power settings while scan is pending, * do it now if the settings changed. */ il_power_set_mode(il, &il->power_data.sleep_cmd_next, false); il_set_tx_power(il, il->tx_power_next, false); il->ops->post_scan(il); out: mutex_unlock(&il->mutex); } void il_setup_scan_deferred_work(struct il_priv *il) { INIT_WORK(&il->scan_completed, il_bg_scan_completed); INIT_WORK(&il->abort_scan, il_bg_abort_scan); INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check); } EXPORT_SYMBOL(il_setup_scan_deferred_work); void il_cancel_scan_deferred_work(struct il_priv *il) { cancel_work_sync(&il->abort_scan); cancel_work_sync(&il->scan_completed); if (cancel_delayed_work_sync(&il->scan_check)) { mutex_lock(&il->mutex); il_force_scan_end(il); mutex_unlock(&il->mutex); } } EXPORT_SYMBOL(il_cancel_scan_deferred_work); /* il->sta_lock must be held */ static void il_sta_ucode_activate(struct il_priv *il, u8 sta_id) { if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n", sta_id, il->stations[sta_id].sta.sta.addr); if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) { D_ASSOC("STA id %u addr %pM already present" " in uCode (according to driver)\n", sta_id, il->stations[sta_id].sta.sta.addr); } else { il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE; D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id, il->stations[sta_id].sta.sta.addr); } } static int il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta, struct il_rx_pkt *pkt, bool sync) { u8 sta_id = addsta->sta.sta_id; unsigned long flags; int ret = -EIO; if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags); return ret; } D_INFO("Processing response for adding station %u\n", sta_id); spin_lock_irqsave(&il->sta_lock, flags); switch (pkt->u.add_sta.status) { case ADD_STA_SUCCESS_MSK: D_INFO("C_ADD_STA PASSED\n"); il_sta_ucode_activate(il, sta_id); ret = 0; break; case ADD_STA_NO_ROOM_IN_TBL: IL_ERR("Adding station %d failed, no room in table.\n", sta_id); break; case ADD_STA_NO_BLOCK_ACK_RESOURCE: IL_ERR("Adding station %d failed, no block ack resource.\n", sta_id); break; case ADD_STA_MODIFY_NON_EXIST_STA: IL_ERR("Attempting to modify non-existing station %d\n", sta_id); break; default: D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); break; } D_INFO("%s station id %u addr %pM\n", il->stations[sta_id].sta.mode == STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id, il->stations[sta_id].sta.sta.addr); /* * XXX: The MAC address in the command buffer is often changed from * the original sent to the device. That is, the MAC address * written to the command buffer often is not the same MAC address * read from the command buffer when the command returns. This * issue has not yet been resolved and this debugging is left to * observe the problem. */ D_INFO("%s station according to cmd buffer %pM\n", il->stations[sta_id].sta.mode == STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); spin_unlock_irqrestore(&il->sta_lock, flags); return ret; } static void il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd, struct il_rx_pkt *pkt) { struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload; il_process_add_sta_resp(il, addsta, pkt, false); } int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags) { struct il_rx_pkt *pkt = NULL; int ret = 0; u8 data[sizeof(*sta)]; struct il_host_cmd cmd = { .id = C_ADD_STA, .flags = flags, .data = data, }; u8 sta_id __maybe_unused = sta->sta.sta_id; D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); if (flags & CMD_ASYNC) cmd.callback = il_add_sta_callback; else { cmd.flags |= CMD_WANT_SKB; might_sleep(); } cmd.len = il->ops->build_addsta_hcmd(sta, data); ret = il_send_cmd(il, &cmd); if (ret || (flags & CMD_ASYNC)) return ret; if (ret == 0) { pkt = (struct il_rx_pkt *)cmd.reply_page; ret = il_process_add_sta_resp(il, sta, pkt, true); } il_free_pages(il, cmd.reply_page); return ret; } EXPORT_SYMBOL(il_send_add_sta); static void il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta) { struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; __le32 sta_flags; u8 mimo_ps_mode; if (!sta || !sta_ht_inf->ht_supported) goto done; mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; D_ASSOC("spatial multiplexing power save mode: %s\n", (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" : (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" : "disabled"); sta_flags = il->stations[idx].sta.station_flags; sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); switch (mimo_ps_mode) { case WLAN_HT_CAP_SM_PS_STATIC: sta_flags |= STA_FLG_MIMO_DIS_MSK; break; case WLAN_HT_CAP_SM_PS_DYNAMIC: sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; break; case WLAN_HT_CAP_SM_PS_DISABLED: break; default: IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode); break; } sta_flags |= cpu_to_le32((u32) sta_ht_inf-> ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); sta_flags |= cpu_to_le32((u32) sta_ht_inf-> ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); if (il_is_ht40_tx_allowed(il, &sta->ht_cap)) sta_flags |= STA_FLG_HT40_EN_MSK; else sta_flags &= ~STA_FLG_HT40_EN_MSK; il->stations[idx].sta.station_flags = sta_flags; done: return; } /** * il_prep_station - Prepare station information for addition * * should be called with sta_lock held */ u8 il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap, struct ieee80211_sta *sta) { struct il_station_entry *station; int i; u8 sta_id = IL_INVALID_STATION; u16 rate; if (is_ap) sta_id = IL_AP_ID; else if (is_broadcast_ether_addr(addr)) sta_id = il->hw_params.bcast_id; else for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { if (ether_addr_equal(il->stations[i].sta.sta.addr, addr)) { sta_id = i; break; } if (!il->stations[i].used && sta_id == IL_INVALID_STATION) sta_id = i; } /* * These two conditions have the same outcome, but keep them * separate */ if (unlikely(sta_id == IL_INVALID_STATION)) return sta_id; /* * uCode is not able to deal with multiple requests to add a * station. Keep track if one is in progress so that we do not send * another. */ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { D_INFO("STA %d already in process of being added.\n", sta_id); return sta_id; } if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) { D_ASSOC("STA %d (%pM) already added, not adding again.\n", sta_id, addr); return sta_id; } station = &il->stations[sta_id]; station->used = IL_STA_DRIVER_ACTIVE; D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr); il->num_stations++; /* Set up the C_ADD_STA command to send to device */ memset(&station->sta, 0, sizeof(struct il_addsta_cmd)); memcpy(station->sta.sta.addr, addr, ETH_ALEN); station->sta.mode = 0; station->sta.sta.sta_id = sta_id; station->sta.station_flags = 0; /* * OK to call unconditionally, since local stations (IBSS BSSID * STA and broadcast STA) pass in a NULL sta, and mac80211 * doesn't allow HT IBSS. */ il_set_ht_add_station(il, sta_id, sta); /* 3945 only */ rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; /* Turn on both antennas for the station... */ station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); return sta_id; } EXPORT_SYMBOL_GPL(il_prep_station); #define STA_WAIT_TIMEOUT (HZ/2) /** * il_add_station_common - */ int il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap, struct ieee80211_sta *sta, u8 *sta_id_r) { unsigned long flags_spin; int ret = 0; u8 sta_id; struct il_addsta_cmd sta_cmd; *sta_id_r = 0; spin_lock_irqsave(&il->sta_lock, flags_spin); sta_id = il_prep_station(il, addr, is_ap, sta); if (sta_id == IL_INVALID_STATION) { IL_ERR("Unable to prepare station %pM for addition\n", addr); spin_unlock_irqrestore(&il->sta_lock, flags_spin); return -EINVAL; } /* * uCode is not able to deal with multiple requests to add a * station. Keep track if one is in progress so that we do not send * another. */ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { D_INFO("STA %d already in process of being added.\n", sta_id); spin_unlock_irqrestore(&il->sta_lock, flags_spin); return -EEXIST; } if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { D_ASSOC("STA %d (%pM) already added, not adding again.\n", sta_id, addr); spin_unlock_irqrestore(&il->sta_lock, flags_spin); return -EEXIST; } il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS; memcpy(&sta_cmd, &il->stations[sta_id].sta, sizeof(struct il_addsta_cmd)); spin_unlock_irqrestore(&il->sta_lock, flags_spin); /* Add station to device's station table */ ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); if (ret) { spin_lock_irqsave(&il->sta_lock, flags_spin); IL_ERR("Adding station %pM failed.\n", il->stations[sta_id].sta.sta.addr); il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; spin_unlock_irqrestore(&il->sta_lock, flags_spin); } *sta_id_r = sta_id; return ret; } EXPORT_SYMBOL(il_add_station_common); /** * il_sta_ucode_deactivate - deactivate ucode status for a station * * il->sta_lock must be held */ static void il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id) { /* Ucode must be active and driver must be non active */ if ((il->stations[sta_id]. used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) != IL_STA_UCODE_ACTIVE) IL_ERR("removed non active STA %u\n", sta_id); il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE; memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry)); D_ASSOC("Removed STA %u\n", sta_id); } static int il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id, bool temporary) { struct il_rx_pkt *pkt; int ret; unsigned long flags_spin; struct il_rem_sta_cmd rm_sta_cmd; struct il_host_cmd cmd = { .id = C_REM_STA, .len = sizeof(struct il_rem_sta_cmd), .flags = CMD_SYNC, .data = &rm_sta_cmd, }; memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); rm_sta_cmd.num_sta = 1; memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); cmd.flags |= CMD_WANT_SKB; ret = il_send_cmd(il, &cmd); if (ret) return ret; pkt = (struct il_rx_pkt *)cmd.reply_page; if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags); ret = -EIO; } if (!ret) { switch (pkt->u.rem_sta.status) { case REM_STA_SUCCESS_MSK: if (!temporary) { spin_lock_irqsave(&il->sta_lock, flags_spin); il_sta_ucode_deactivate(il, sta_id); spin_unlock_irqrestore(&il->sta_lock, flags_spin); } D_ASSOC("C_REM_STA PASSED\n"); break; default: ret = -EIO; IL_ERR("C_REM_STA failed\n"); break; } } il_free_pages(il, cmd.reply_page); return ret; } /** * il_remove_station - Remove driver's knowledge of station. */ int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr) { unsigned long flags; if (!il_is_ready(il)) { D_INFO("Unable to remove station %pM, device not ready.\n", addr); /* * It is typical for stations to be removed when we are * going down. Return success since device will be down * soon anyway */ return 0; } D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr); if (WARN_ON(sta_id == IL_INVALID_STATION)) return -EINVAL; spin_lock_irqsave(&il->sta_lock, flags); if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) { D_INFO("Removing %pM but non DRIVER active\n", addr); goto out_err; } if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { D_INFO("Removing %pM but non UCODE active\n", addr); goto out_err; } if (il->stations[sta_id].used & IL_STA_LOCAL) { kfree(il->stations[sta_id].lq); il->stations[sta_id].lq = NULL; } il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; il->num_stations--; BUG_ON(il->num_stations < 0); spin_unlock_irqrestore(&il->sta_lock, flags); return il_send_remove_station(il, addr, sta_id, false); out_err: spin_unlock_irqrestore(&il->sta_lock, flags); return -EINVAL; } EXPORT_SYMBOL_GPL(il_remove_station); /** * il_clear_ucode_stations - clear ucode station table bits * * This function clears all the bits in the driver indicating * which stations are active in the ucode. Call when something * other than explicit station management would cause this in * the ucode, e.g. unassociated RXON. */ void il_clear_ucode_stations(struct il_priv *il) { int i; unsigned long flags_spin; bool cleared = false; D_INFO("Clearing ucode stations in driver\n"); spin_lock_irqsave(&il->sta_lock, flags_spin); for (i = 0; i < il->hw_params.max_stations; i++) { if (il->stations[i].used & IL_STA_UCODE_ACTIVE) { D_INFO("Clearing ucode active for station %d\n", i); il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; cleared = true; } } spin_unlock_irqrestore(&il->sta_lock, flags_spin); if (!cleared) D_INFO("No active stations found to be cleared\n"); } EXPORT_SYMBOL(il_clear_ucode_stations); /** * il_restore_stations() - Restore driver known stations to device * * All stations considered active by driver, but not present in ucode, is * restored. * * Function sleeps. */ void il_restore_stations(struct il_priv *il) { struct il_addsta_cmd sta_cmd; struct il_link_quality_cmd lq; unsigned long flags_spin; int i; bool found = false; int ret; bool send_lq; if (!il_is_ready(il)) { D_INFO("Not ready yet, not restoring any stations.\n"); return; } D_ASSOC("Restoring all known stations ... start.\n"); spin_lock_irqsave(&il->sta_lock, flags_spin); for (i = 0; i < il->hw_params.max_stations; i++) { if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) && !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) { D_ASSOC("Restoring sta %pM\n", il->stations[i].sta.sta.addr); il->stations[i].sta.mode = 0; il->stations[i].used |= IL_STA_UCODE_INPROGRESS; found = true; } } for (i = 0; i < il->hw_params.max_stations; i++) { if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) { memcpy(&sta_cmd, &il->stations[i].sta, sizeof(struct il_addsta_cmd)); send_lq = false; if (il->stations[i].lq) { memcpy(&lq, il->stations[i].lq, sizeof(struct il_link_quality_cmd)); send_lq = true; } spin_unlock_irqrestore(&il->sta_lock, flags_spin); ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); if (ret) { spin_lock_irqsave(&il->sta_lock, flags_spin); IL_ERR("Adding station %pM failed.\n", il->stations[i].sta.sta.addr); il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE; il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS; spin_unlock_irqrestore(&il->sta_lock, flags_spin); } /* * Rate scaling has already been initialized, send * current LQ command */ if (send_lq) il_send_lq_cmd(il, &lq, CMD_SYNC, true); spin_lock_irqsave(&il->sta_lock, flags_spin); il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS; } } spin_unlock_irqrestore(&il->sta_lock, flags_spin); if (!found) D_INFO("Restoring all known stations" " .... no stations to be restored.\n"); else D_INFO("Restoring all known stations" " .... complete.\n"); } EXPORT_SYMBOL(il_restore_stations); int il_get_free_ucode_key_idx(struct il_priv *il) { int i; for (i = 0; i < il->sta_key_max_num; i++) if (!test_and_set_bit(i, &il->ucode_key_table)) return i; return WEP_INVALID_OFFSET; } EXPORT_SYMBOL(il_get_free_ucode_key_idx); void il_dealloc_bcast_stations(struct il_priv *il) { unsigned long flags; int i; spin_lock_irqsave(&il->sta_lock, flags); for (i = 0; i < il->hw_params.max_stations; i++) { if (!(il->stations[i].used & IL_STA_BCAST)) continue; il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; il->num_stations--; BUG_ON(il->num_stations < 0); kfree(il->stations[i].lq); il->stations[i].lq = NULL; } spin_unlock_irqrestore(&il->sta_lock, flags); } EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations); #ifdef CONFIG_IWLEGACY_DEBUG static void il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) { int i; D_RATE("lq station id 0x%x\n", lq->sta_id); D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk, lq->general_params.dual_stream_ant_msk); for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags); } #else static inline void il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) { } #endif /** * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity * * It sometimes happens when a HT rate has been in use and we * loose connectivity with AP then mac80211 will first tell us that the * current channel is not HT anymore before removing the station. In such a * scenario the RXON flags will be updated to indicate we are not * communicating HT anymore, but the LQ command may still contain HT rates. * Test for this to prevent driver from sending LQ command between the time * RXON flags are updated and when LQ command is updated. */ static bool il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq) { int i; if (il->ht.enabled) return true; D_INFO("Channel %u is not an HT channel\n", il->active.channel); for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { D_INFO("idx %d of LQ expects HT channel\n", i); return false; } } return true; } /** * il_send_lq_cmd() - Send link quality command * @init: This command is sent as part of station initialization right * after station has been added. * * The link quality command is sent as the last step of station creation. * This is the special case in which init is set and we call a callback in * this case to clear the state indicating that station creation is in * progress. */ int il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq, u8 flags, bool init) { int ret = 0; unsigned long flags_spin; struct il_host_cmd cmd = { .id = C_TX_LINK_QUALITY_CMD, .len = sizeof(struct il_link_quality_cmd), .flags = flags, .data = lq, }; if (WARN_ON(lq->sta_id == IL_INVALID_STATION)) return -EINVAL; spin_lock_irqsave(&il->sta_lock, flags_spin); if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) { spin_unlock_irqrestore(&il->sta_lock, flags_spin); return -EINVAL; } spin_unlock_irqrestore(&il->sta_lock, flags_spin); il_dump_lq_cmd(il, lq); BUG_ON(init && (cmd.flags & CMD_ASYNC)); if (il_is_lq_table_valid(il, lq)) ret = il_send_cmd(il, &cmd); else ret = -EINVAL; if (cmd.flags & CMD_ASYNC) return ret; if (init) { D_INFO("init LQ command complete," " clearing sta addition status for sta %d\n", lq->sta_id); spin_lock_irqsave(&il->sta_lock, flags_spin); il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS; spin_unlock_irqrestore(&il->sta_lock, flags_spin); } return ret; } EXPORT_SYMBOL(il_send_lq_cmd); int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct il_priv *il = hw->priv; struct il_station_priv_common *sta_common = (void *)sta->drv_priv; int ret; mutex_lock(&il->mutex); D_MAC80211("enter station %pM\n", sta->addr); ret = il_remove_station(il, sta_common->sta_id, sta->addr); if (ret) IL_ERR("Error removing station %pM\n", sta->addr); D_MAC80211("leave ret %d\n", ret); mutex_unlock(&il->mutex); return ret; } EXPORT_SYMBOL(il_mac_sta_remove); /************************** RX-FUNCTIONS ****************************/ /* * Rx theory of operation * * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), * each of which point to Receive Buffers to be filled by the NIC. These get * used not only for Rx frames, but for any command response or notification * from the NIC. The driver and NIC manage the Rx buffers by means * of idxes into the circular buffer. * * Rx Queue Indexes * The host/firmware share two idx registers for managing the Rx buffers. * * The READ idx maps to the first position that the firmware may be writing * to -- the driver can read up to (but not including) this position and get * good data. * The READ idx is managed by the firmware once the card is enabled. * * The WRITE idx maps to the last position the driver has read from -- the * position preceding WRITE is the last slot the firmware can place a packet. * * The queue is empty (no good data) if WRITE = READ - 1, and is full if * WRITE = READ. * * During initialization, the host sets up the READ queue position to the first * IDX position, and WRITE to the last (READ - 1 wrapped) * * When the firmware places a packet in a buffer, it will advance the READ idx * and fire the RX interrupt. The driver can then query the READ idx and * process as many packets as possible, moving the WRITE idx forward as it * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled * to replenish the iwl->rxq->rx_free. * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the * iwl->rxq is replenished and the READ IDX is updated (updating the * 'processed' and 'read' driver idxes as well) * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' idx is updated. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ * IDX is not incremented and iwl->status(RX_STALLED) is set. If there * were enough free buffers and RX_STALLED is set it is cleared. * * * Driver sequence: * * il_rx_queue_alloc() Allocates rx_free * il_rx_replenish() Replenishes rx_free list from rx_used, and calls * il_rx_queue_restock * il_rx_queue_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates * the WRITE idx. If insufficient rx_free buffers * are available, schedules il_rx_replenish * * -- enable interrupts -- * ISR - il_rx() Detach il_rx_bufs from pool up to the * READ IDX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. * Calls il_rx_queue_restock to refill any empty * slots. * ... * */ /** * il_rx_queue_space - Return number of free slots available in queue. */ int il_rx_queue_space(const struct il_rx_queue *q) { int s = q->read - q->write; if (s <= 0) s += RX_QUEUE_SIZE; /* keep some buffer to not confuse full and empty queue */ s -= 2; if (s < 0) s = 0; return s; } EXPORT_SYMBOL(il_rx_queue_space); /** * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue */ void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q) { unsigned long flags; u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg; u32 reg; spin_lock_irqsave(&q->lock, flags); if (q->need_update == 0) goto exit_unlock; /* If power-saving is in use, make sure device is awake */ if (test_bit(S_POWER_PMI, &il->status)) { reg = _il_rd(il, CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n", reg); il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); goto exit_unlock; } q->write_actual = (q->write & ~0x7); il_wr(il, rx_wrt_ptr_reg, q->write_actual); /* Else device is assumed to be awake */ } else { /* Device expects a multiple of 8 */ q->write_actual = (q->write & ~0x7); il_wr(il, rx_wrt_ptr_reg, q->write_actual); } q->need_update = 0; exit_unlock: spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(il_rx_queue_update_write_ptr); int il_rx_queue_alloc(struct il_priv *il) { struct il_rx_queue *rxq = &il->rxq; struct device *dev = &il->pci_dev->dev; int i; spin_lock_init(&rxq->lock); INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err_bd; rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status), &rxq->rb_stts_dma, GFP_KERNEL); if (!rxq->rb_stts) goto err_rb; /* Fill the rx_used queue with _all_ of the Rx buffers */ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) list_add_tail(&rxq->pool[i].list, &rxq->rx_used); /* Set us so that we have processed and used all buffers, but have * not restocked the Rx queue with fresh buffers */ rxq->read = rxq->write = 0; rxq->write_actual = 0; rxq->free_count = 0; rxq->need_update = 0; return 0; err_rb: dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); err_bd: return -ENOMEM; } EXPORT_SYMBOL(il_rx_queue_alloc); void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb) { struct il_rx_pkt *pkt = rxb_addr(rxb); struct il_spectrum_notification *report = &(pkt->u.spectrum_notif); if (!report->state) { D_11H("Spectrum Measure Notification: Start\n"); return; } memcpy(&il->measure_report, report, sizeof(*report)); il->measurement_status |= MEASUREMENT_READY; } EXPORT_SYMBOL(il_hdl_spectrum_measurement); /* * returns non-zero if packet should be dropped */ int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, u32 decrypt_res, struct ieee80211_rx_status *stats) { u16 fc = le16_to_cpu(hdr->frame_control); /* * All contexts have the same setting here due to it being * a module parameter, so OK to check any context. */ if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) return 0; if (!(fc & IEEE80211_FCTL_PROTECTED)) return 0; D_RX("decrypt_res:0x%x\n", decrypt_res); switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { case RX_RES_STATUS_SEC_TYPE_TKIP: /* The uCode has got a bad phase 1 Key, pushes the packet. * Decryption will be done in SW. */ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_BAD_KEY_TTAK) break; case RX_RES_STATUS_SEC_TYPE_WEP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_BAD_ICV_MIC) { /* bad ICV, the packet is destroyed since the * decryption is inplace, drop it */ D_RX("Packet destroyed\n"); return -1; } case RX_RES_STATUS_SEC_TYPE_CCMP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_DECRYPT_OK) { D_RX("hw decrypt successfully!!!\n"); stats->flag |= RX_FLAG_DECRYPTED; } break; default: break; } return 0; } EXPORT_SYMBOL(il_set_decrypted_flag); /** * il_txq_update_write_ptr - Send new write idx to hardware */ void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) { u32 reg = 0; int txq_id = txq->q.id; if (txq->need_update == 0) return; /* if we're trying to save power */ if (test_bit(S_POWER_PMI, &il->status)) { /* wake up nic if it's powered down ... * uCode will wake up, and interrupt us again, so next * time we'll skip this part. */ reg = _il_rd(il, CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n", txq_id, reg); il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); return; } il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); /* * else not in power-save mode, * uCode will never sleep when we're * trying to tx (during RFKILL, we're not trying to tx). */ } else _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); txq->need_update = 0; } EXPORT_SYMBOL(il_txq_update_write_ptr); /** * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's */ void il_tx_queue_unmap(struct il_priv *il, int txq_id) { struct il_tx_queue *txq = &il->txq[txq_id]; struct il_queue *q = &txq->q; if (q->n_bd == 0) return; while (q->write_ptr != q->read_ptr) { il->ops->txq_free_tfd(il, txq); q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); } } EXPORT_SYMBOL(il_tx_queue_unmap); /** * il_tx_queue_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. * * Empty queue by removing and destroying all BD's. * Free all buffers. * 0-fill, but do not free "txq" descriptor structure. */ void il_tx_queue_free(struct il_priv *il, int txq_id) { struct il_tx_queue *txq = &il->txq[txq_id]; struct device *dev = &il->pci_dev->dev; int i; il_tx_queue_unmap(il, txq_id); /* De-alloc array of command/tx buffers */ for (i = 0; i < TFD_TX_CMD_SLOTS; i++) kfree(txq->cmd[i]); /* De-alloc circular buffer of TFDs */ if (txq->q.n_bd) dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, txq->tfds, txq->q.dma_addr); /* De-alloc array of per-TFD driver data */ kfree(txq->skbs); txq->skbs = NULL; /* deallocate arrays */ kfree(txq->cmd); kfree(txq->meta); txq->cmd = NULL; txq->meta = NULL; /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); } EXPORT_SYMBOL(il_tx_queue_free); /** * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue */ void il_cmd_queue_unmap(struct il_priv *il) { struct il_tx_queue *txq = &il->txq[il->cmd_queue]; struct il_queue *q = &txq->q; int i; if (q->n_bd == 0) return; while (q->read_ptr != q->write_ptr) { i = il_get_cmd_idx(q, q->read_ptr, 0); if (txq->meta[i].flags & CMD_MAPPED) { pci_unmap_single(il->pci_dev, dma_unmap_addr(&txq->meta[i], mapping), dma_unmap_len(&txq->meta[i], len), PCI_DMA_BIDIRECTIONAL); txq->meta[i].flags = 0; } q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); } i = q->n_win; if (txq->meta[i].flags & CMD_MAPPED) { pci_unmap_single(il->pci_dev, dma_unmap_addr(&txq->meta[i], mapping), dma_unmap_len(&txq->meta[i], len), PCI_DMA_BIDIRECTIONAL); txq->meta[i].flags = 0; } } EXPORT_SYMBOL(il_cmd_queue_unmap); /** * il_cmd_queue_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. * * Empty queue by removing and destroying all BD's. * Free all buffers. * 0-fill, but do not free "txq" descriptor structure. */ void il_cmd_queue_free(struct il_priv *il) { struct il_tx_queue *txq = &il->txq[il->cmd_queue]; struct device *dev = &il->pci_dev->dev; int i; il_cmd_queue_unmap(il); /* De-alloc array of command/tx buffers */ for (i = 0; i <= TFD_CMD_SLOTS; i++) kfree(txq->cmd[i]); /* De-alloc circular buffer of TFDs */ if (txq->q.n_bd) dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, txq->tfds, txq->q.dma_addr); /* deallocate arrays */ kfree(txq->cmd); kfree(txq->meta); txq->cmd = NULL; txq->meta = NULL; /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); } EXPORT_SYMBOL(il_cmd_queue_free); /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** * DMA services * * Theory of operation * * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer * of buffer descriptors, each of which points to one or more data buffers for * the device to read from or fill. Driver and device exchange status of each * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty * entries in each circular buffer, to protect against confusing empty and full * queue states. * * The device reads or writes the data in the queues via the device's several * DMA/FIFO channels. Each queue is mapped to a single DMA channel. * * For Tx queue, there are low mark and high mark limits. If, after queuing * the packet for Tx, free space become < low mark, Tx queue stopped. When * reclaiming packets (on 'tx done IRQ), if free space become > high mark, * Tx queue resumed. * * See more detailed info in 4965.h. ***************************************************/ int il_queue_space(const struct il_queue *q) { int s = q->read_ptr - q->write_ptr; if (q->read_ptr > q->write_ptr) s -= q->n_bd; if (s <= 0) s += q->n_win; /* keep some reserve to not confuse empty and full situations */ s -= 2; if (s < 0) s = 0; return s; } EXPORT_SYMBOL(il_queue_space); /** * il_queue_init - Initialize queue's high/low-water and read/write idxes */ static int il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id) { /* * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise * il_queue_inc_wrap and il_queue_dec_wrap are broken. */ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); /* FIXME: remove q->n_bd */ q->n_bd = TFD_QUEUE_SIZE_MAX; q->n_win = slots; q->id = id; /* slots_must be power-of-two size, otherwise * il_get_cmd_idx is broken. */ BUG_ON(!is_power_of_2(slots)); q->low_mark = q->n_win / 4; if (q->low_mark < 4) q->low_mark = 4; q->high_mark = q->n_win / 8; if (q->high_mark < 2) q->high_mark = 2; q->write_ptr = q->read_ptr = 0; return 0; } /** * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue */ static int il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id) { struct device *dev = &il->pci_dev->dev; size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; /* Driver ilate data, only for Tx (not command) queues, * not shared with device. */ if (id != il->cmd_queue) { txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(struct skb *), GFP_KERNEL); if (!txq->skbs) { IL_ERR("Fail to alloc skbs\n"); goto error; } } else txq->skbs = NULL; /* Circular buffer of transmit frame descriptors (TFDs), * shared with device */ txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); if (!txq->tfds) { IL_ERR("Fail to alloc TFDs\n"); goto error; } txq->q.id = id; return 0; error: kfree(txq->skbs); txq->skbs = NULL; return -ENOMEM; } /** * il_tx_queue_init - Allocate and initialize one tx/cmd queue */ int il_tx_queue_init(struct il_priv *il, u32 txq_id) { int i, len, ret; int slots, actual_slots; struct il_tx_queue *txq = &il->txq[txq_id]; /* * Alloc buffer array for commands (Tx or other types of commands). * For the command queue (#4/#9), allocate command space + one big * command for scan, since scan command is very huge; the system will * not have two scans at the same time, so only one is needed. * For normal Tx queues (all other queues), no super-size command * space is needed. */ if (txq_id == il->cmd_queue) { slots = TFD_CMD_SLOTS; actual_slots = slots + 1; } else { slots = TFD_TX_CMD_SLOTS; actual_slots = slots; } txq->meta = kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL); txq->cmd = kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL); if (!txq->meta || !txq->cmd) goto out_free_arrays; len = sizeof(struct il_device_cmd); for (i = 0; i < actual_slots; i++) { /* only happens for cmd queue */ if (i == slots) len = IL_MAX_CMD_SIZE; txq->cmd[i] = kmalloc(len, GFP_KERNEL); if (!txq->cmd[i]) goto err; } /* Alloc driver data array and TFD circular buffer */ ret = il_tx_queue_alloc(il, txq, txq_id); if (ret) goto err; txq->need_update = 0; /* * For the default queues 0-3, set up the swq_id * already -- all others need to get one later * (if they need one at all). */ if (txq_id < 4) il_set_swq_id(txq, txq_id, txq_id); /* Initialize queue's high/low-water marks, and head/tail idxes */ il_queue_init(il, &txq->q, slots, txq_id); /* Tell device where to find queue */ il->ops->txq_init(il, txq); return 0; err: for (i = 0; i < actual_slots; i++) kfree(txq->cmd[i]); out_free_arrays: kfree(txq->meta); kfree(txq->cmd); return -ENOMEM; } EXPORT_SYMBOL(il_tx_queue_init); void il_tx_queue_reset(struct il_priv *il, u32 txq_id) { int slots, actual_slots; struct il_tx_queue *txq = &il->txq[txq_id]; if (txq_id == il->cmd_queue) { slots = TFD_CMD_SLOTS; actual_slots = TFD_CMD_SLOTS + 1; } else { slots = TFD_TX_CMD_SLOTS; actual_slots = TFD_TX_CMD_SLOTS; } memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots); txq->need_update = 0; /* Initialize queue's high/low-water marks, and head/tail idxes */ il_queue_init(il, &txq->q, slots, txq_id); /* Tell device where to find queue */ il->ops->txq_init(il, txq); } EXPORT_SYMBOL(il_tx_queue_reset); /*************** HOST COMMAND QUEUE FUNCTIONS *****/ /** * il_enqueue_hcmd - enqueue a uCode command * @il: device ilate data point * @cmd: a point to the ucode command structure * * The function returns < 0 values to indicate the operation is * failed. On success, it turns the idx (> 0) of command in the * command queue. */ int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) { struct il_tx_queue *txq = &il->txq[il->cmd_queue]; struct il_queue *q = &txq->q; struct il_device_cmd *out_cmd; struct il_cmd_meta *out_meta; dma_addr_t phys_addr; unsigned long flags; int len; u32 idx; u16 fix_size; cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len); fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr)); /* If any of the command structures end up being larger than * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then * we will need to increase the size of the TFD entries * Also, check to see if command buffer should not exceed the size * of device_cmd and max_cmd_size. */ BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && !(cmd->flags & CMD_SIZE_HUGE)); BUG_ON(fix_size > IL_MAX_CMD_SIZE); if (il_is_rfkill(il) || il_is_ctkill(il)) { IL_WARN("Not sending command - %s KILL\n", il_is_rfkill(il) ? "RF" : "CT"); return -EIO; } spin_lock_irqsave(&il->hcmd_lock, flags); if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_irqrestore(&il->hcmd_lock, flags); IL_ERR("Restarting adapter due to command queue full\n"); queue_work(il->workqueue, &il->restart); return -ENOSPC; } idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); out_cmd = txq->cmd[idx]; out_meta = &txq->meta[idx]; if (WARN_ON(out_meta->flags & CMD_MAPPED)) { spin_unlock_irqrestore(&il->hcmd_lock, flags); return -ENOSPC; } memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ out_meta->flags = cmd->flags | CMD_MAPPED; if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; if (cmd->flags & CMD_ASYNC) out_meta->callback = cmd->callback; out_cmd->hdr.cmd = cmd->id; memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); /* At this point, the out_cmd now has all of the incoming cmd * information */ out_cmd->hdr.flags = 0; out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr)); if (cmd->flags & CMD_SIZE_HUGE) out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; len = sizeof(struct il_device_cmd); if (idx == TFD_CMD_SLOTS) len = IL_MAX_CMD_SIZE; #ifdef CONFIG_IWLEGACY_DEBUG switch (out_cmd->hdr.cmd) { case C_TX_LINK_QUALITY_CMD: case C_SENSITIVITY: D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, " "%d bytes at %d[%d]:%d\n", il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, idx, il->cmd_queue); break; default: D_HC("Sending command %s (#%x), seq: 0x%04X, " "%d bytes at %d[%d]:%d\n", il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, idx, il->cmd_queue); } #endif txq->need_update = 1; if (il->ops->txq_update_byte_cnt_tbl) /* Set up entry in queue's byte count circular buffer */ il->ops->txq_update_byte_cnt_tbl(il, txq, 0); phys_addr = pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size, PCI_DMA_BIDIRECTIONAL); dma_unmap_addr_set(out_meta, mapping, phys_addr); dma_unmap_len_set(out_meta, len, fix_size); il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1, U32_PAD(cmd->len)); /* Increment and update queue's write idx */ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); il_txq_update_write_ptr(il, txq); spin_unlock_irqrestore(&il->hcmd_lock, flags); return idx; } /** * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd * * When FW advances 'R' idx, all entries between old and new 'R' idx * need to be reclaimed. As result, some free space forms. If there is * enough free space (> low mark), wake the stack that feeds us. */ static void il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx) { struct il_tx_queue *txq = &il->txq[txq_id]; struct il_queue *q = &txq->q; int nfreed = 0; if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr); return; } for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { if (nfreed++ > 0) { IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx, q->write_ptr, q->read_ptr); queue_work(il->workqueue, &il->restart); } } } /** * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim * * If an Rx buffer has an async callback associated with it the callback * will be executed. The attached skb (if present) will only be freed * if the callback returns 1 */ void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) { struct il_rx_pkt *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); int idx = SEQ_TO_IDX(sequence); int cmd_idx; bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); struct il_device_cmd *cmd; struct il_cmd_meta *meta; struct il_tx_queue *txq = &il->txq[il->cmd_queue]; unsigned long flags; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ if (WARN (txq_id != il->cmd_queue, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr, il->txq[il->cmd_queue].q.write_ptr)) { il_print_hex_error(il, pkt, 32); return; } cmd_idx = il_get_cmd_idx(&txq->q, idx, huge); cmd = txq->cmd[cmd_idx];