summaryrefslogtreecommitdiffstats
path: root/tests/virtio-blk-test.c
blob: 04c608764ba7a037d1666da5b46305a0af187441 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
/*
 * QTest testcase for VirtIO Block Device
 *
 * Copyright (c) 2014 SUSE LINUX Products GmbH
 * Copyright (c) 2014 Marc Marí
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 */

#include "qemu/osdep.h"
#include "libqtest.h"
#include "libqos/libqos-pc.h"
#include "libqos/libqos-spapr.h"
#include "libqos/virtio.h"
#include "libqos/virtio-pci.h"
#include "libqos/virtio-mmio.h"
#include "libqos/malloc-generic.h"
#include "qapi/qmp/qdict.h"
#include "qemu/bswap.h"
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_config.h"
#include "standard-headers/linux/virtio_ring.h"
#include "standard-headers/linux/virtio_blk.h"
#include "standard-headers/linux/virtio_pci.h"

/* TODO actually test the results and get rid of this */
#define qmp_discard_response(...) qobject_unref(qmp(__VA_ARGS__))

#define TEST_IMAGE_SIZE         (64 * 1024 * 1024)
#define QVIRTIO_BLK_TIMEOUT_US  (30 * 1000 * 1000)
#define PCI_SLOT_HP             0x06
#define PCI_SLOT                0x04
#define PCI_FN                  0x00

#define MMIO_PAGE_SIZE          4096
#define MMIO_DEV_BASE_ADDR      0x0A003E00
#define MMIO_RAM_ADDR           0x40000000
#define MMIO_RAM_SIZE           0x20000000

typedef struct QVirtioBlkReq {
    uint32_t type;
    uint32_t ioprio;
    uint64_t sector;
    char *data;
    uint8_t status;
} QVirtioBlkReq;

static char *drive_create(void)
{
    int fd, ret;
    char *tmp_path = g_strdup("/tmp/qtest.XXXXXX");

    /* Create a temporary raw image */
    fd = mkstemp(tmp_path);
    g_assert_cmpint(fd, >=, 0);
    ret = ftruncate(fd, TEST_IMAGE_SIZE);
    g_assert_cmpint(ret, ==, 0);
    close(fd);

    return tmp_path;
}

static QOSState *pci_test_start(void)
{
    QOSState *qs;
    const char *arch = qtest_get_arch();
    char *tmp_path;
    const char *cmd = "-drive if=none,id=drive0,file=%s,format=raw "
                      "-drive if=none,id=drive1,file=null-co://,format=raw "
                      "-device virtio-blk-pci,id=drv0,drive=drive0,"
                      "addr=%x.%x";

    tmp_path = drive_create();

    if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
        qs = qtest_pc_boot(cmd, tmp_path, PCI_SLOT, PCI_FN);
    } else if (strcmp(arch, "ppc64") == 0) {
        qs = qtest_spapr_boot(cmd, tmp_path, PCI_SLOT, PCI_FN);
    } else {
        g_printerr("virtio-blk tests are only available on x86 or ppc64\n");
        exit(EXIT_FAILURE);
    }
    global_qtest = qs->qts;
    unlink(tmp_path);
    g_free(tmp_path);
    return qs;
}

static void arm_test_start(void)
{
    char *tmp_path;

    tmp_path = drive_create();

    global_qtest = qtest_initf("-machine virt "
                               "-drive if=none,id=drive0,file=%s,format=raw "
                               "-device virtio-blk-device,drive=drive0",
                               tmp_path);
    unlink(tmp_path);
    g_free(tmp_path);
}

static void test_end(void)
{
    qtest_end();
}

static QVirtioPCIDevice *virtio_blk_pci_init(QPCIBus *bus, int slot)
{
    QVirtioPCIDevice *dev;

    dev = qvirtio_pci_device_find_slot(bus, VIRTIO_ID_BLOCK, slot);
    g_assert(dev != NULL);
    g_assert_cmphex(dev->vdev.device_type, ==, VIRTIO_ID_BLOCK);
    g_assert_cmphex(dev->pdev->devfn, ==, ((slot << 3) | PCI_FN));

    qvirtio_pci_device_enable(dev);
    qvirtio_reset(&dev->vdev);
    qvirtio_set_acknowledge(&dev->vdev);
    qvirtio_set_driver(&dev->vdev);

    return dev;
}

static inline void virtio_blk_fix_request(QVirtioDevice *d, QVirtioBlkReq *req)
{
#ifdef HOST_WORDS_BIGENDIAN
    const bool host_is_big_endian = true;
#else
    const bool host_is_big_endian = false;
#endif

    if (qvirtio_is_big_endian(d) != host_is_big_endian) {
        req->type = bswap32(req->type);
        req->ioprio = bswap32(req->ioprio);
        req->sector = bswap64(req->sector);
    }
}

static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d,
                                   QVirtioBlkReq *req, uint64_t data_size)
{
    uint64_t addr;
    uint8_t status = 0xFF;

    g_assert_cmpuint(data_size % 512, ==, 0);
    addr = guest_alloc(alloc, sizeof(*req) + data_size);

    virtio_blk_fix_request(d, req);

    memwrite(addr, req, 16);
    memwrite(addr + 16, req->data, data_size);
    memwrite(addr + 16 + data_size, &status, sizeof(status));

    return addr;
}

static void test_basic(QVirtioDevice *dev, QGuestAllocator *alloc,
                       QVirtQueue *vq)
{
    QVirtioBlkReq req;
    uint64_t req_addr;
    uint64_t capacity;
    uint32_t features;
    uint32_t free_head;
    uint8_t status;
    char *data;

    capacity = qvirtio_config_readq(dev, 0);

    g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);

    features = qvirtio_get_features(dev);
    features = features & ~(QVIRTIO_F_BAD_FEATURE |
                    (1u << VIRTIO_RING_F_INDIRECT_DESC) |
                    (1u << VIRTIO_RING_F_EVENT_IDX) |
                    (1u << VIRTIO_BLK_F_SCSI));
    qvirtio_set_features(dev, features);

    qvirtio_set_driver_ok(dev);

    /* Write and read with 3 descriptor layout */
    /* Write request */
    req.type = VIRTIO_BLK_T_OUT;
    req.ioprio = 1;
    req.sector = 0;
    req.data = g_malloc0(512);
    strcpy(req.data, "TEST");

    req_addr = virtio_blk_request(alloc, dev, &req, 512);

    g_free(req.data);

    free_head = qvirtqueue_add(vq, req_addr, 16, false, true);
    qvirtqueue_add(vq, req_addr + 16, 512, false, true);
    qvirtqueue_add(vq, req_addr + 528, 1, true, false);

    qvirtqueue_kick(dev, vq, free_head);

    qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US);
    status = readb(req_addr + 528);
    g_assert_cmpint(status, ==, 0);

    guest_free(alloc, req_addr);

    /* Read request */
    req.type = VIRTIO_BLK_T_IN;
    req.ioprio = 1;
    req.sector = 0;
    req.data = g_malloc0(512);

    req_addr = virtio_blk_request(alloc, dev, &req, 512);

    g_free(req.data);

    free_head = qvirtqueue_add(vq, req_addr, 16, false, true);
    qvirtqueue_add(vq, req_addr + 16, 512, true, true);
    qvirtqueue_add(vq, req_addr + 528, 1, true, false);

    qvirtqueue_kick(dev, vq, free_head);

    qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US);
    status = readb(req_addr + 528);
    g_assert_cmpint(status, ==, 0);

    data = g_malloc0(512);
    memread(req_addr + 16, data, 512);
    g_assert_cmpstr(data, ==, "TEST");
    g_free(data);

    guest_free(alloc, req_addr);

    if (features & (1u << VIRTIO_F_ANY_LAYOUT)) {
        /* Write and read with 2 descriptor layout */
        /* Write request */
        req.type = VIRTIO_BLK_T_OUT;
        req.ioprio = 1;
        req.sector = 1;
        req.data = g_malloc0(512);
        strcpy(req.data, "TEST");

        req_addr = virtio_blk_request(alloc, dev, &req, 512);

        g_free(req.data);

        free_head = qvirtqueue_add(vq, req_addr, 528, false, true);
        qvirtqueue_add(vq, req_addr + 528, 1, true, false);
        qvirtqueue_kick(dev, vq, free_head);

        qvirtio_wait_used_elem(dev, vq, free_head, NULL,
                               QVIRTIO_BLK_TIMEOUT_US);
        status = readb(req_addr + 528);
        g_assert_cmpint(status, ==, 0);

        guest_free(alloc, req_addr);

        /* Read request */
        req.type = VIRTIO_BLK_T_IN;
        req.ioprio = 1;
        req.sector = 1;
        req.data = g_malloc0(512);

        req_addr = virtio_blk_request(alloc, dev, &req, 512);

        g_free(req.data);

        free_head = qvirtqueue_add(vq, req_addr, 16, false, true);
        qvirtqueue_add(vq, req_addr + 16, 513, true, false);

        qvirtqueue_kick(dev, vq, free_head);

        qvirtio_wait_used_elem(dev, vq, free_head, NULL,
                               QVIRTIO_BLK_TIMEOUT_US);
        status = readb(req_addr + 528);
        g_assert_cmpint(status, ==, 0);

        data = g_malloc0(512);
        memread(req_addr + 16, data, 512);
        g_assert_cmpstr(data, ==, "TEST");
        g_free(data);

        guest_free(alloc, req_addr);
    }
}

static void pci_basic(void)
{
    QVirtioPCIDevice *dev;
    QOSState *qs;
    QVirtQueuePCI *vqpci;

    qs = pci_test_start();
    dev = virtio_blk_pci_init(qs->pcibus, PCI_SLOT);

    vqpci = (QVirtQueuePCI *)qvirtqueue_setup(&dev->vdev, qs->alloc, 0);

    test_basic(&dev->vdev, qs->alloc, &vqpci->vq);

    /* End test */
    qvirtqueue_cleanup(dev->vdev.bus, &vqpci->vq, qs->alloc);
    qvirtio_pci_device_disable(dev);
    qvirtio_pci_device_free(dev);
    qtest_shutdown(qs);
}

static void pci_indirect(void)
{
    QVirtioPCIDevice *dev;
    QVirtQueuePCI *vqpci;
    QOSState *qs;
    QVirtioBlkReq req;
    QVRingIndirectDesc *indirect;
    uint64_t req_addr;
    uint64_t capacity;
    uint32_t features;
    uint32_t free_head;
    uint8_t status;
    char *data;

    qs = pci_test_start();

    dev = virtio_blk_pci_init(qs->pcibus, PCI_SLOT);

    capacity = qvirtio_config_readq(&dev->vdev, 0);
    g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);

    features = qvirtio_get_features(&dev->vdev);
    g_assert_cmphex(features & (1u << VIRTIO_RING_F_INDIRECT_DESC), !=, 0);
    features = features & ~(QVIRTIO_F_BAD_FEATURE |
                            (1u << VIRTIO_RING_F_EVENT_IDX) |
                            (1u << VIRTIO_BLK_F_SCSI));
    qvirtio_set_features(&dev->vdev, features);

    vqpci = (QVirtQueuePCI *)qvirtqueue_setup(&dev->vdev, qs->alloc, 0);
    qvirtio_set_driver_ok(&dev->vdev);

    /* Write request */
    req.type = VIRTIO_BLK_T_OUT;
    req.ioprio = 1;
    req.sector = 0;
    req.data = g_malloc0(512);
    strcpy(req.data, "TEST");

    req_addr = virtio_blk_request(qs->alloc, &dev->vdev, &req, 512);

    g_free(req.data);

    indirect = qvring_indirect_desc_setup(&dev->vdev, qs->alloc, 2);
    qvring_indirect_desc_add(indirect, req_addr, 528, false);
    qvring_indirect_desc_add(indirect, req_addr + 528, 1, true);
    free_head = qvirtqueue_add_indirect(&vqpci->vq, indirect);
    qvirtqueue_kick(&dev->vdev, &vqpci->vq, free_head);

    qvirtio_wait_used_elem(&dev->vdev, &vqpci->vq, free_head, NULL,
                           QVIRTIO_BLK_TIMEOUT_US);
    status = readb(req_addr + 528);
    g_assert_cmpint(status, ==, 0);

    g_free(indirect);
    guest_free(qs->alloc, req_addr);

    /* Read request */
    req.type = VIRTIO_BLK_T_IN;
    req.ioprio = 1;
    req.sector = 0;
    req.data = g_malloc0(512);
    strcpy(req.data, "TEST");

    req_addr = virtio_blk_request(qs->alloc, &dev->vdev, &req, 512);

    g_free(req.data);

    indirect = qvring_indirect_desc_setup(&dev->vdev, qs->alloc, 2);
    qvring_indirect_desc_add(indirect, req_addr, 16, false);
    qvring_indirect_desc_add(indirect, req_addr + 16, 513, true);
    free_head = qvirtqueue_add_indirect(&vqpci->vq, indirect);
    qvirtqueue_kick(&dev->vdev, &vqpci->vq, free_head);

    qvirtio_wait_used_elem(&dev->vdev, &vqpci->vq, free_head, NULL,
                           QVIRTIO_BLK_TIMEOUT_US);
    status = readb(req_addr + 528);
    g_assert_cmpint(status, ==, 0);

    data = g_malloc0(512);
    memread(req_addr + 16, data, 512);
    g_assert_cmpstr(data, ==, "TEST");
    g_free(data);

    g_free(indirect);
    guest_free(qs->alloc, req_addr);

    /* End test */
    qvirtqueue_cleanup(dev->vdev.bus, &vqpci->vq, qs->alloc);
    qvirtio_pci_device_disable(dev);
    qvirtio_pci_device_free(dev);
    qtest_shutdown(qs);
}

static void pci_config(void)
{
    QVirtioPCIDevice *dev;
    QOSState *qs;
    int n_size = TEST_IMAGE_SIZE / 2;
    uint64_t capacity;

    qs = pci_test_start();

    dev = virtio_blk_pci_init(qs->pcibus, PCI_SLOT);

    capacity = qvirtio_config_readq(&dev->vdev, 0);
    g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);

    qvirtio_set_driver_ok(&dev->vdev);

    qmp_discard_response("{ 'execute': 'block_resize', "
                         " 'arguments': { 'device': 'drive0', "
                         " 'size': %d } }", n_size);
    qvirtio_wait_config_isr(&dev->vdev, QVIRTIO_BLK_TIMEOUT_US);

    capacity = qvirtio_config_readq(&dev->vdev, 0);
    g_assert_cmpint(capacity, ==, n_size / 512);

    qvirtio_pci_device_disable(dev);
    qvirtio_pci_device_free(dev);

    qtest_shutdown(qs);
}

static void pci_msix(void)
{
    QVirtioPCIDevice *dev;
    QOSState *qs;
    QVirtQueuePCI *vqpci;
    QVirtioBlkReq req;
    int n_size = TEST_IMAGE_SIZE / 2;
    uint64_t req_addr;
    uint64_t capacity;
    uint32_t features;
    uint32_t free_head;
    uint8_t status;
    char *data;

    qs = pci_test_start();

    dev = virtio_blk_pci_init(qs->pcibus, PCI_SLOT);
    qpci_msix_enable(dev->pdev);

    qvirtio_pci_set_msix_configuration_vector(dev, qs->alloc, 0);

    capacity = qvirtio_config_readq(&dev->vdev, 0);
    g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);

    features = qvirtio_get_features(&dev->vdev);
    features = features & ~(QVIRTIO_F_BAD_FEATURE |
                            (1u << VIRTIO_RING_F_INDIRECT_DESC) |
                            (1u << VIRTIO_RING_F_EVENT_IDX) |
                            (1u << VIRTIO_BLK_F_SCSI));
    qvirtio_set_features(&dev->vdev, features);

    vqpci = (QVirtQueuePCI *)qvirtqueue_setup(&dev->vdev, qs->alloc, 0);
    qvirtqueue_pci_msix_setup(dev, vqpci, qs->alloc, 1);

    qvirtio_set_driver_ok(&dev->vdev);

    qmp_discard_response("{ 'execute': 'block_resize', "
                         " 'arguments': { 'device': 'drive0', "
                         " 'size': %d } }", n_size);

    qvirtio_wait_config_isr(&dev->vdev, QVIRTIO_BLK_TIMEOUT_US);

    capacity = qvirtio_config_readq(&dev->vdev, 0);
    g_assert_cmpint(capacity, ==, n_size / 512);

    /* Write request */
    req.type = VIRTIO_BLK_T_OUT;
    req.ioprio = 1;
    req.sector = 0;
    req.data = g_malloc0(512);
    strcpy(req.data, "TEST");

    req_addr = virtio_blk_request(qs->alloc, &dev->vdev, &req, 512);

    g_free(req.data);

    free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
    qvirtqueue_add(&vqpci->vq, req_addr + 16, 512, false, true);
    qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);
    qvirtqueue_kick(&dev->vdev, &vqpci->vq, free_head);

    qvirtio_wait_used_elem(&dev->vdev, &vqpci->vq, free_head, NULL,
                           QVIRTIO_BLK_TIMEOUT_US);

    status = readb(req_addr + 528);
    g_assert_cmpint(status, ==, 0);

    guest_free(qs->alloc, req_addr);

    /* Read request */
    req.type = VIRTIO_BLK_T_IN;
    req.ioprio = 1;
    req.sector = 0;
    req.data = g_malloc0(512);

    req_addr = virtio_blk_request(qs->alloc, &dev->vdev, &req, 512);

    g_free(req.data);

    free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
    qvirtqueue_add(&vqpci->vq, req_addr + 16, 512, true, true);
    qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);

    qvirtqueue_kick(&dev->vdev, &vqpci->vq, free_head);


    qvirtio_wait_used_elem(&dev->vdev, &vqpci->vq, free_head, NULL,
                           QVIRTIO_BLK_TIMEOUT_US);

    status = readb(req_addr + 528);
    g_assert_cmpint(status, ==, 0);

    data = g_malloc0(512);
    memread(req_addr + 16, data, 512);
    g_assert_cmpstr(data, ==, "TEST");
    g_free(data);

    guest_free(qs->alloc, req_addr);

    /* End test */
    qvirtqueue_cleanup(dev->vdev.bus, &vqpci->vq, qs->alloc);
    qpci_msix_disable(dev->pdev);
    qvirtio_pci_device_disable(dev);
    qvirtio_pci_device_free(dev);
    qtest_shutdown(qs);
}

static void pci_idx(void)
{
    QVirtioPCIDevice *dev;
    QOSState *qs;
    QVirtQueuePCI *vqpci;
    QVirtioBlkReq req;
    uint64_t req_addr;
    uint64_t capacity;
    uint32_t features;
    uint32_t free_head;
    uint32_t write_head;
    uint32_t desc_idx;
    uint8_t status;
    char *data;

    qs = pci_test_start();

    dev = virtio_blk_pci_init(qs->pcibus, PCI_SLOT);
    qpci_msix_enable(dev->pdev);

    qvirtio_pci_set_msix_configuration_vector(dev, qs->alloc, 0);

    capacity = qvirtio_config_readq(&dev->vdev, 0);
    g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);

    features = qvirtio_get_features(&dev->vdev);
    features = features & ~(QVIRTIO_F_BAD_FEATURE |
                            (1u << VIRTIO_RING_F_INDIRECT_DESC) |
                            (1u << VIRTIO_F_NOTIFY_ON_EMPTY) |
                            (1u << VIRTIO_BLK_F_SCSI));
    qvirtio_set_features(&dev->vdev, features);

    vqpci = (QVirtQueuePCI *)qvirtqueue_setup(&dev->vdev, qs->alloc, 0);
    qvirtqueue_pci_msix_setup(dev, vqpci, qs->alloc, 1);

    qvirtio_set_driver_ok(&dev->vdev);

    /* Write request */
    req.type = VIRTIO_BLK_T_OUT;
    req.ioprio = 1;
    req.sector = 0;
    req.data = g_malloc0(512);
    strcpy(req.data, "TEST");

    req_addr = virtio_blk_request(qs->alloc, &dev->vdev, &req, 512);

    g_free(req.data);

    free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
    qvirtqueue_add(&vqpci->vq, req_addr + 16, 512, false, true);
    qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);
    qvirtqueue_kick(&dev->vdev, &vqpci->vq, free_head);

    qvirtio_wait_used_elem(&dev->vdev, &vqpci->vq, free_head, NULL,
                           QVIRTIO_BLK_TIMEOUT_US);

    /* Write request */
    req.type = VIRTIO_BLK_T_OUT;
    req.ioprio = 1;
    req.sector = 1;
    req.data = g_malloc0(512);
    strcpy(req.data, "TEST");

    req_addr = virtio_blk_request(qs->alloc, &dev->vdev, &req, 512);

    g_free(req.data);

    /* Notify after processing the third request */
    qvirtqueue_set_used_event(&vqpci->vq, 2);
    free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
    qvirtqueue_add(&vqpci->vq, req_addr + 16, 512, false, true);
    qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);
    qvirtqueue_kick(&dev->vdev, &vqpci->vq, free_head);
    write_head = free_head;

    /* No notification expected */
    status = qvirtio_wait_status_byte_no_isr(&dev->vdev,
                                             &vqpci->vq, req_addr + 528,
                                             QVIRTIO_BLK_TIMEOUT_US);
    g_assert_cmpint(status, ==, 0);

    guest_free(qs->alloc, req_addr);

    /* Read request */
    req.type = VIRTIO_BLK_T_IN;
    req.ioprio = 1;
    req.sector = 1;
    req.data = g_malloc0(512);

    req_addr = virtio_blk_request(qs->alloc, &dev->vdev, &req, 512);

    g_free(req.data);

    free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
    qvirtqueue_add(&vqpci->vq, req_addr + 16, 512, true, true);
    qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);

    qvirtqueue_kick(&dev->vdev, &vqpci->vq, free_head);

    /* We get just one notification for both requests */
    qvirtio_wait_used_elem(&dev->vdev, &vqpci->vq, write_head, NULL,
                           QVIRTIO_BLK_TIMEOUT_US);
    g_assert(qvirtqueue_get_buf(&vqpci->vq, &desc_idx, NULL));
    g_assert_cmpint(desc_idx, ==, free_head);

    status = readb(req_addr + 528);
    g_assert_cmpint(status, ==, 0);

    data = g_malloc0(512);
    memread(req_addr + 16, data, 512);
    g_assert_cmpstr(data, ==, "TEST");
    g_free(data);

    guest_free(qs->alloc, req_addr);

    /* End test */
    qvirtqueue_cleanup(dev->vdev.bus, &vqpci->vq, qs->alloc);
    qpci_msix_disable(dev->pdev);
    qvirtio_pci_device_disable(dev);
    qvirtio_pci_device_free(dev);
    qtest_shutdown(qs);
}

static void pci_hotplug(void)
{
    QVirtioPCIDevice *dev;
    QOSState *qs;
    const char *arch = qtest_get_arch();

    qs = pci_test_start();

    /* plug secondary disk */
    qtest_qmp_device_add("virtio-blk-pci", "drv1",
                         "{'addr': %s, 'drive': 'drive1'}",
                         stringify(PCI_SLOT_HP));

    dev = virtio_blk_pci_init(qs->pcibus, PCI_SLOT_HP);
    g_assert(dev);
    qvirtio_pci_device_disable(dev);
    qvirtio_pci_device_free(dev);

    /* unplug secondary disk */
    if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
        qpci_unplug_acpi_device_test("drv1", PCI_SLOT_HP);
    }
    qtest_shutdown(qs);
}

/*
 * Check that setting the vring addr on a non-existent virtqueue does
 * not crash.
 */
static void test_nonexistent_virtqueue(void)
{
    QPCIBar bar0;
    QOSState *qs;
    QPCIDevice *dev;

    qs = pci_test_start();
    dev = qpci_device_find(qs->pcibus, QPCI_DEVFN(4, 0));
    g_assert(dev != NULL);

    qpci_device_enable(dev);
    bar0 = qpci_iomap(dev, 0, NULL);

    qpci_io_writeb(dev, bar0, VIRTIO_PCI_QUEUE_SEL, 2);
    qpci_io_writel(dev, bar0, VIRTIO_PCI_QUEUE_PFN, 1);

    g_free(dev);
    qtest_shutdown(qs);
}

static void mmio_basic(void)
{
    QVirtioMMIODevice *dev;
    QVirtQueue *vq;
    QGuestAllocator *alloc;
    int n_size = TEST_IMAGE_SIZE / 2;
    uint64_t capacity;

    arm_test_start();

    dev = qvirtio_mmio_init_device(MMIO_DEV_BASE_ADDR, MMIO_PAGE_SIZE);
    g_assert(dev != NULL);
    g_assert_cmphex(dev->vdev.device_type, ==, VIRTIO_ID_BLOCK);

    qvirtio_reset(&dev->vdev);
    qvirtio_set_acknowledge(&dev->vdev);
    qvirtio_set_driver(&dev->vdev);

    alloc = generic_alloc_init(MMIO_RAM_ADDR, MMIO_RAM_SIZE, MMIO_PAGE_SIZE);
    vq = qvirtqueue_setup(&dev->vdev, alloc, 0);

    test_basic(&dev->vdev, alloc, vq);

    qmp_discard_response("{ 'execute': 'block_resize', "
                         " 'arguments': { 'device': 'drive0', "
                         " 'size': %d } }", n_size);

    qvirtio_wait_queue_isr(&dev->vdev, vq, QVIRTIO_BLK_TIMEOUT_US);

    capacity = qvirtio_config_readq(&dev->vdev, 0);
    g_assert_cmpint(capacity, ==, n_size / 512);

    /* End test */
    qvirtqueue_cleanup(dev->vdev.bus, vq, alloc);
    g_free(dev);
    generic_alloc_uninit(alloc);
    test_end();
}

int main(int argc, char **argv)
{
    const char *arch = qtest_get_arch();

    g_test_init(&argc, &argv, NULL);

    if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0 ||
        strcmp(arch, "ppc64") == 0) {
        qtest_add_func("/virtio/blk/pci/basic", pci_basic);
        qtest_add_func("/virtio/blk/pci/indirect", pci_indirect);
        qtest_add_func("/virtio/blk/pci/config", pci_config);
        qtest_add_func("/virtio/blk/pci/nxvirtq", test_nonexistent_virtqueue);
        if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
            qtest_add_func("/virtio/blk/pci/msix", pci_msix);
            qtest_add_func("/virtio/blk/pci/idx", pci_idx);
        }
        qtest_add_func("/virtio/blk/pci/hotplug", pci_hotplug);
    } else if (strcmp(arch, "arm") == 0) {
        qtest_add_func("/virtio/blk/mmio/basic", mmio_basic);
    }

    return g_test_run();
}
mmary='file diffstat' width='100%'> -rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r420.c12
-rw-r--r--drivers/gpu/drm/radeon/r600.c105
-rw-r--r--drivers/gpu/drm/radeon/radeon.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c89
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c170
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c5
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen611
-rw-r--r--drivers/gpu/drm/radeon/rs600.c3
-rw-r--r--drivers/gpu/drm/radeon/rv770.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c64
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c173
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c203
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c189
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c8
-rw-r--r--drivers/gpu/vga/vgaarb.c61
-rw-r--r--drivers/hid/Kconfig8
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-gyration.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-roccat-kone.c73
-rw-r--r--drivers/hid/hid-roccat-kone.h9
-rw-r--r--drivers/hid/hid-roccat.c428
-rw-r--r--drivers/hid/hid-roccat.h31
-rw-r--r--drivers/hwmon/Kconfig43
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/adm1031.c68
-rw-r--r--drivers/hwmon/ads7871.c253
-rw-r--r--drivers/hwmon/adt7411.c2
-rw-r--r--drivers/hwmon/applesmc.c186
-rw-r--r--drivers/hwmon/asc7621.c2
-rw-r--r--drivers/hwmon/asus_atk0110.c7
-rw-r--r--drivers/hwmon/coretemp.c93
-rw-r--r--drivers/hwmon/dme1737.c328
-rw-r--r--drivers/hwmon/emc1403.c344
-rw-r--r--drivers/hwmon/f71882fg.c170
-rw-r--r--drivers/hwmon/f75375s.c2
-rw-r--r--drivers/hwmon/fschmd.c9
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/lis3lv02d.c245
-rw-r--r--drivers/hwmon/lis3lv02d.h11
-rw-r--r--drivers/hwmon/lm63.c16
-rw-r--r--drivers/hwmon/lm73.c1
-rw-r--r--drivers/hwmon/lm75.c4
-rw-r--r--drivers/hwmon/lm90.c3
-rw-r--r--drivers/hwmon/lm95241.c1
-rw-r--r--drivers/hwmon/ltc4245.c18
-rw-r--r--drivers/hwmon/tmp102.c319
-rw-r--r--drivers/hwmon/tmp401.c255
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/ultra45_env.c7
-rw-r--r--drivers/hwmon/w83781d.c2
-rw-r--r--drivers/hwmon/w83793.c10
-rw-r--r--drivers/i2c/busses/Kconfig40
-rw-r--r--drivers/i2c/busses/Makefile6
-rw-r--r--drivers/i2c/busses/i2c-cpm.c30
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c11
-rw-r--r--drivers/i2c/busses/i2c-mpc.c25
-rw-r--r--drivers/i2c/i2c-core.c182
-rw-r--r--drivers/i2c/i2c-smbus.c1
-rw-r--r--drivers/ide/cmd640.c6
-rw-r--r--drivers/ide/gayle.c147
-rw-r--r--drivers/ide/ide-disk.c40
-rw-r--r--drivers/ide/ide-gd.c11
-rw-r--r--drivers/ide/ide_platform.c1
-rw-r--r--drivers/ide/pdc202xx_old.c5
-rw-r--r--drivers/ide/pmac.c17
-rw-r--r--drivers/idle/Kconfig11
-rw-r--r--drivers/idle/Makefile1
-rwxr-xr-xdrivers/idle/intel_idle.c461
-rw-r--r--drivers/ieee1394/dv1394.c11
-rw-r--r--drivers/ieee1394/raw1394.c3
-rw-r--r--drivers/ieee1394/video1394.c5
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/core_priv.h4
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/sysfs.c21
-rw-r--r--drivers/infiniband/core/ucm.c11
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c50
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c33
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h76
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c20
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig8
-rw-r--r--drivers/infiniband/hw/ipath/Makefile6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c28
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c1862
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2631
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c72
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/Kconfig7
-rw-r--r--drivers/infiniband/hw/qib/Makefile15
-rw-r--r--drivers/infiniband/hw/qib/qib.h1439
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h156
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h758
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c484
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c894
-rw-r--r--drivers/infiniband/hw/qib/qib_dma.c182
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c665
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c451
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2317
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c616
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3576
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4618
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c7645
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1586
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c236
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c328
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2173
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h373
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c174
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c503
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c738
-rw-r--r--drivers/infiniband/hw/qib/qib_pio_copy.c64
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c1255
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c564
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h184
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2288
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c817
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c (renamed from drivers/infiniband/hw/ipath/ipath_sd7220.c)859
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220_img.c (renamed from drivers/infiniband/hw/ipath/ipath_sd7220_img.c)19
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c973
-rw-r--r--drivers/infiniband/hw/qib/qib_srq.c375
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c691
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c498
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c557
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c555
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c607
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c157
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c897
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2248
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h1100
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c368
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c (renamed from drivers/infiniband/hw/ipath/ipath_7220.h)49
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c171
-rw-r--r--drivers/input/joydev.c10
-rw-r--r--drivers/input/keyboard/adp5588-keys.c1
-rw-r--r--drivers/input/keyboard/amikbd.c97
-rw-r--r--drivers/input/keyboard/lm8323.c2
-rw-r--r--drivers/input/keyboard/max7359_keypad.c1
-rw-r--r--drivers/input/keyboard/qt2160.c1
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c2
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ad714x-i2c.c1
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c34
-rw-r--r--drivers/input/misc/max8925_onkey.c148
-rw-r--r--drivers/input/misc/pcf8574_keypad.c2
-rw-r--r--drivers/input/misc/sparcspkr.c14
-rw-r--r--drivers/input/misc/twl4030-vibra.c2
-rw-r--r--drivers/input/misc/uinput.c4
-rw-r--r--drivers/input/mouse/amimouse.c98
-rw-r--r--drivers/input/mouse/synaptics_i2c.c1
-rw-r--r--drivers/input/serio/Kconfig3
-rw-r--r--drivers/input/serio/i8042-sparcio.h9
-rw-r--r--drivers/input/serio/xilinx_ps2.c15
-rw-r--r--drivers/input/tablet/wacom_sys.c1
-rw-r--r--drivers/input/tablet/wacom_wac.c73
-rw-r--r--drivers/input/tablet/wacom_wac.h1
-rw-r--r--drivers/input/touchscreen/Kconfig15
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ad7879.c5
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/eeti_ts.c2
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c1
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c4
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c396
-rw-r--r--drivers/input/touchscreen/tsc2007.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c7
-rw-r--r--drivers/isdn/capi/capi.c17
-rw-r--r--drivers/isdn/capi/kcapi.c12
-rw-r--r--drivers/isdn/gigaset/capi.c41
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c4
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c4
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c3
-rw-r--r--drivers/isdn/i4l/isdn_common.c18
-rw-r--r--drivers/isdn/mISDN/timerdev.c12
-rw-r--r--drivers/leds/Kconfig19
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/leds-88pm860x.c11
-rw-r--r--drivers/leds/leds-bd2802.c2
-rw-r--r--drivers/leds/leds-gpio.c34
-rw-r--r--drivers/leds/leds-lp3944.c10
-rw-r--r--drivers/leds/leds-mc13783.c403
-rw-r--r--drivers/leds/leds-net5501.c94
-rw-r--r--drivers/leds/leds-pca9532.c5
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/macintosh/macio_asic.c32
-rw-r--r--drivers/macintosh/macio_sysfs.c6
-rw-r--r--drivers/macintosh/mediabay.c8
-rw-r--r--drivers/macintosh/nvram.c2
-rw-r--r--drivers/macintosh/rack-meter.c12
-rw-r--r--drivers/macintosh/smu.c7
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/therm_pm72.c9
-rw-r--r--drivers/macintosh/therm_windtunnel.c7
-rw-r--r--drivers/macintosh/via-pmu.c17
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c5
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c1
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c1
-rw-r--r--drivers/md/Kconfig4
-rw-r--r--drivers/md/bitmap.c43
-rw-r--r--drivers/md/bitmap.h2
-rw-r--r--drivers/md/faulty.c9
-rw-r--r--drivers/md/linear.c36
-rw-r--r--drivers/md/md.c537
-rw-r--r--drivers/md/md.h16
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c251
-rw-r--r--drivers/md/raid0.h3
-rw-r--r--drivers/md/raid1.c114
-rw-r--r--drivers/md/raid10.c300
-rw-r--r--drivers/md/raid10.h12
-rw-r--r--drivers/md/raid5.c233
-rw-r--r--drivers/media/IR/Kconfig2
-rw-r--r--drivers/media/IR/imon.c75
-rw-r--r--drivers/media/IR/ir-keytable.c17
-rw-r--r--drivers/media/IR/ir-sysfs.c7
-rw-r--r--drivers/media/IR/keymaps/Makefile3
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c90
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-m135a.c147
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-m733a-rm-k6.c95
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c2
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c31
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c17
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c30
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c27
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c17
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.h11
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig4
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb/dvb-usb/ttusb2.c95
-rw-r--r--drivers/media/dvb/firewire/firedtv-1394.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c5
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c26
-rw-r--r--drivers/media/dvb/frontends/ds3000.c5
-rw-r--r--drivers/media/dvb/frontends/stv6110x.c5
-rw-r--r--drivers/media/dvb/ngene/ngene-cards.c15
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c80
-rw-r--r--drivers/media/dvb/ngene/ngene-dvb.c15
-rw-r--r--drivers/media/dvb/ngene/ngene-i2c.c1
-rw-r--r--drivers/media/dvb/ngene/ngene.h3
-rw-r--r--drivers/media/dvb/ttpci/Kconfig5
-rw-r--r--drivers/media/dvb/ttpci/av7110.c4
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c8
-rw-r--r--drivers/media/dvb/ttpci/av7110_ca.c5
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c1
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c1
-rw-r--r--drivers/media/video/Kconfig4
-rw-r--r--drivers/media/video/ak881x.c3
-rw-r--r--drivers/media/video/bw-qcam.c759
-rw-r--r--drivers/media/video/bw-qcam.h69
-rw-r--r--drivers/media/video/c-qcam.c634
-rw-r--r--drivers/media/video/cx18/cx18-alsa-main.c2
-rw-r--r--drivers/media/video/cx18/cx18-alsa-mixer.c2
-rw-r--r--drivers/media/video/cx18/cx18-alsa-mixer.h2
-rw-r--r--drivers/media/video/cx18/cx18-alsa-pcm.c2
-rw-r--r--drivers/media/video/cx18/cx18-alsa-pcm.h2
-rw-r--r--drivers/media/video/cx18/cx18-alsa.h2
-rw-r--r--drivers/media/video/cx18/cx18-av-audio.c2
-rw-r--r--drivers/media/video/cx18/cx18-av-core.c127
-rw-r--r--drivers/media/video/cx18/cx18-av-core.h2
-rw-r--r--drivers/media/video/cx18/cx18-av-firmware.c2
-rw-r--r--drivers/media/video/cx18/cx18-cards.c2
-rw-r--r--drivers/media/video/cx18/cx18-cards.h2
-rw-r--r--drivers/media/video/cx18/cx18-controls.c11
-rw-r--r--drivers/media/video/cx18/cx18-driver.c2
-rw-r--r--drivers/media/video/cx18/cx18-driver.h2
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c2
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/video/cx18/cx18-firmware.c2
-rw-r--r--drivers/media/video/cx18/cx18-gpio.c2
-rw-r--r--drivers/media/video/cx18/cx18-gpio.h2
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/video/cx18/cx18-io.c2
-rw-r--r--drivers/media/video/cx18/cx18-io.h4
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c10
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.h2
-rw-r--r--drivers/media/video/cx18/cx18-irq.c2
-rw-r--r--drivers/media/video/cx18/cx18-irq.h2
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.h2
-rw-r--r--drivers/media/video/cx18/cx18-queue.c2
-rw-r--r--drivers/media/video/cx18/cx18-queue.h2
-rw-r--r--drivers/media/video/cx18/cx18-scb.c2
-rw-r--r--drivers/media/video/cx18/cx18-scb.h2
-rw-r--r--drivers/media/video/cx18/cx18-streams.c2
-rw-r--r--drivers/media/video/cx18/cx18-streams.h2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c5
-rw-r--r--drivers/media/video/cx2341x.c6
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c12
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-input.h2
-rw-r--r--drivers/media/video/cx23885/cx23885-ioctl.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-ioctl.h2
-rw-r--r--drivers/media/video/cx23885/cx23885-ir.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-ir.h2
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c5
-rw-r--r--drivers/media/video/cx23885/cx23888-ir.c2
-rw-r--r--drivers/media/video/cx23885/cx23888-ir.h2
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c99
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c16
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c7
-rw-r--r--drivers/media/video/gspca/sonixb.c2
-rw-r--r--drivers/media/video/gspca/sonixj.c5
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-controls.c10
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c11
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c6
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c23
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c14
-rw-r--r--drivers/media/video/mt9m001.c6
-rw-r--r--drivers/media/video/mt9m111.c6
-rw-r--r--drivers/media/video/mt9t031.c4
-rw-r--r--drivers/media/video/mt9t112.c6
-rw-r--r--drivers/media/video/mt9v011.c37
-rw-r--r--drivers/media/video/mt9v022.c6
-rw-r--r--drivers/media/video/mx3_camera.c4
-rw-r--r--drivers/media/video/omap/omap_vout.c3
-rw-r--r--drivers/media/video/ov772x.c6
-rw-r--r--drivers/media/video/ov9640.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.c1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.h5
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c37
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-main.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.c13
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c15
-rw-r--r--drivers/media/video/pxa_camera.c4
-rw-r--r--drivers/media/video/rj54n1cb0c.c6
-rw-r--r--drivers/media/video/saa7115.c19
-rw-r--r--drivers/media/video/saa7127.c8
-rw-r--r--drivers/media/video/saa7134/saa6752hs.c46
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c55
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c9
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c18
-rw-r--r--drivers/media/video/saa7134/saa7134.h1
-rw-r--r--drivers/media/video/saa717x.c38
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c4
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h2
-rw-r--r--drivers/media/video/soc_camera.c3
-rw-r--r--drivers/media/video/soc_camera_platform.c2
-rw-r--r--drivers/media/video/tcm825x.c8
-rw-r--r--drivers/media/video/tvp514x.c223
-rw-r--r--drivers/media/video/tvp5150.c20
-rw-r--r--drivers/media/video/tw9910.c4
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c3
-rw-r--r--drivers/message/fusion/mptscsih.c6
-rw-r--r--drivers/message/i2o/i2o_config.c11
-rw-r--r--drivers/mfd/88pm860x-core.c42
-rw-r--r--drivers/mfd/88pm860x-i2c.c2
-rw-r--r--drivers/mfd/Kconfig93
-rw-r--r--drivers/mfd/Makefile13
-rw-r--r--drivers/mfd/ab3100-core.c97
-rw-r--r--drivers/mfd/ab3100-otp.c13
-rw-r--r--drivers/mfd/ab3550-core.c1400
-rw-r--r--drivers/mfd/ab4500-core.c209
-rw-r--r--drivers/mfd/ab8500-core.c444
-rw-r--r--drivers/mfd/ab8500-spi.c133
-rw-r--r--drivers/mfd/abx500-core.c157
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/da903x.c1
-rw-r--r--drivers/mfd/janz-cmodio.c304
-rw-r--r--drivers/mfd/max8925-core.c7
-rw-r--r--drivers/mfd/max8925-i2c.c3
-rw-r--r--drivers/mfd/mc13783-core.c4
-rw-r--r--drivers/mfd/menelaus.c1
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/pcf50633-adc.c39
-rw-r--r--drivers/mfd/pcf50633-core.c348
-rw-r--r--drivers/mfd/pcf50633-irq.c318
-rw-r--r--drivers/mfd/rdc321x-southbridge.c123
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c28
-rw-r--r--drivers/mfd/t7l66xb.c3
-rw-r--r--drivers/mfd/tc35892.c345
-rw-r--r--drivers/mfd/timberdale.c156
-rw-r--r--drivers/mfd/timberdale.h16
-rw-r--r--drivers/mfd/tps65010.c1
-rw-r--r--drivers/mfd/tps6507x.c159
-rw-r--r--drivers/mfd/twl4030-irq.c11
-rw-r--r--drivers/mfd/wm831x-core.c112
-rw-r--r--drivers/mfd/wm831x-irq.c18
-rw-r--r--drivers/mfd/wm8350-i2c.c4
-rw-r--r--drivers/mfd/wm8400-core.c6
-rw-r--r--drivers/misc/Kconfig32
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c134
-rw-r--r--drivers/misc/ad525x_dpot-spi.c172
-rw-r--r--drivers/misc/ad525x_dpot.c1016
-rw-r--r--drivers/misc/ad525x_dpot.h202
-rw-r--r--drivers/misc/eeprom/at24.c1
-rw-r--r--drivers/misc/lkdtm.c20
-rw-r--r--drivers/misc/vmware_balloon.c18
-rw-r--r--drivers/mmc/core/core.c3
-rw-r--r--drivers/mmc/core/sd_ops.c2
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/core/sdio_io.c30
-rw-r--r--drivers/mmc/host/Kconfig22
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/at91_mci.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c66
-rw-r--r--drivers/mmc/host/au1xmmc.c2
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/davinci_mmc.c111
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/msm_sdcc.c474
-rw-r--r--drivers/mmc/host/msm_sdcc.h15
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c116
-rw-r--r--drivers/mmc/host/of_mmc_spi.c4
-rw-r--r--drivers/mmc/host/omap.c65
-rw-r--r--drivers/mmc/host/omap_hsmmc.c279
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-of-core.c11
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c12
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c12
-rw-r--r--drivers/mmc/host/sdhci-pci.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c26
-rw-r--r--drivers/mmc/host/sdhci-s3c.c10
-rw-r--r--drivers/mmc/host/sdhci-spear.c298
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/mmc/host/sdhci.h42
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c940
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.c369
-rw-r--r--drivers/mmc/host/tmio_mmc.h13
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mtd/maps/physmap_of.c13
-rw-r--r--drivers/mtd/maps/pismo.c2
-rw-r--r--drivers/mtd/maps/sun_uflash.c9
-rw-r--r--drivers/mtd/mtdchar.c30
-rw-r--r--drivers/mtd/nand/Kconfig21
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c11
-rw-r--r--drivers/mtd/nand/fsl_upm.c24
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c8
-rw-r--r--drivers/mtd/nand/ndfc.c15
-rw-r--r--drivers/mtd/nand/pasemi_nand.c9
-rw-r--r--drivers/mtd/nand/r852.c27
-rw-r--r--drivers/mtd/nand/socrates_nand.c11
-rw-r--r--drivers/mtd/ubi/cdev.c3
-rw-r--r--drivers/net/3c507.c3
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/8139too.c3
-rw-r--r--drivers/net/benet/be.h2
-rw-r--r--drivers/net/benet/be_cmds.c32
-rw-r--r--drivers/net/benet/be_main.c13
-rw-r--r--drivers/net/bfin_mac.c2
-rw-r--r--drivers/net/bmac.c7
-rw-r--r--drivers/net/bnx2.c14
-rw-r--r--drivers/net/can/Kconfig10
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/janz-ican3.c1830
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c18
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c12
-rw-r--r--drivers/net/cnic.c10
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/ehea/ehea_main.c21
-rw-r--r--drivers/net/enic/enic.h7
-rw-r--r--drivers/net/enic/enic_main.c197
-rw-r--r--drivers/net/epic100.c7
-rw-r--r--drivers/net/ethoc.c34
-rw-r--r--drivers/net/fec.c66
-rw-r--r--drivers/net/fec.h2
-rw-r--r--drivers/net/fec_mpc52xx.c20
-rw-r--r--drivers/net/fec_mpc52xx_phy.c11
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c15
-rw-r--r--drivers/net/fs_enet/mac-fcc.c57
-rw-r--r--drivers/net/fs_enet/mac-fec.c4
-rw-r--r--drivers/net/fs_enet/mac-scc.c6
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c11
-rw-r--r--drivers/net/fs_enet/mii-fec.c13
-rw-r--r--drivers/net/fsl_pq_mdio.c9
-rw-r--r--drivers/net/gianfar.c19
-rw-r--r--drivers/net/greth.c14
-rw-r--r--drivers/net/hamradio/yam.c3
-rw-r--r--drivers/net/ibm_newemac/core.c21
-rw-r--r--drivers/net/ibm_newemac/debug.c9
-rw-r--r--drivers/net/ibm_newemac/debug.h4
-rw-r--r--drivers/net/ibm_newemac/mal.c36
-rw-r--r--drivers/net/ibm_newemac/rgmii.c20
-rw-r--r--drivers/net/ibm_newemac/tah.c15
-rw-r--r--drivers/net/ibm_newemac/zmii.c17
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c71
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h5
-rw-r--r--drivers/net/korina.c32
-rw-r--r--drivers/net/ksz884x.c3
-rw-r--r--drivers/net/ll_temac.h5
-rw-r--r--drivers/net/ll_temac_main.c96
-rw-r--r--drivers/net/mace.c7
-rw-r--r--drivers/net/macvlan.c9
-rw-r--r--drivers/net/mlx4/icm.c36
-rw-r--r--drivers/net/myri_sbus.c9
-rw-r--r--drivers/net/niu.c17
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c1
-rw-r--r--drivers/net/phy/lxt.c51
-rw-r--r--drivers/net/phy/mdio-gpio.c13
-rw-r--r--drivers/net/ppp_generic.c6
-rw-r--r--drivers/net/pppoe.c1
-rw-r--r--drivers/net/r8169.c11
-rw-r--r--drivers/net/sfc/net_driver.h2
-rw-r--r--drivers/net/sfc/siena.c4
-rw-r--r--drivers/net/sh_eth.c3
-rw-r--r--drivers/net/sunbmac.c13
-rw-r--r--drivers/net/sunhme.c15
-rw-r--r--drivers/net/sunlance.c13
-rw-r--r--drivers/net/sunqe.c13
-rw-r--r--drivers/net/tehuti.c4
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/ucc_geth.c9
-rw-r--r--drivers/net/usb/asix.c6
-rw-r--r--drivers/net/usb/hso.c3
-rw-r--r--drivers/net/virtio_net.c48
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/fw.c2
-rw-r--r--drivers/net/wimax/i2400m/rx.c4
-rw-r--r--drivers/net/wireless/airo.c15
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c14
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c36
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c75
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c318
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c56
-rw-r--r--drivers/net/wireless/libertas/rx.c5
-rw-r--r--drivers/net/wireless/orinoco/airport.c7
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/rndis_wlan.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c7
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c2
-rw-r--r--drivers/net/xilinx_emaclite.c17
-rw-r--r--drivers/of/device.c25
-rw-r--r--drivers/of/fdt.c15
-rw-r--r--drivers/of/of_i2c.c4
-rw-r--r--drivers/of/of_mdio.c6
-rw-r--r--drivers/of/of_spi.c2
-rw-r--r--drivers/of/platform.c13
-rw-r--r--drivers/parport/parport_amiga.c64
-rw-r--r--drivers/parport/parport_sunbpp.c7
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/access.c41
-rw-r--r--drivers/pci/dmar.c82
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c10
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c17
-rw-r--r--drivers/pci/intel-iommu.c129
-rw-r--r--drivers/pci/intr_remapping.c6
-rw-r--r--drivers/pci/pci-sysfs.c7
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c179
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h23
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c77
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c566
-rw-r--r--drivers/pci/probe.c8
-rw-r--r--drivers/pci/quirks.c26
-rw-r--r--drivers/pci/setup-res.c10
-rw-r--r--drivers/pcmcia/ds.c1
-rw-r--r--drivers/pcmcia/electra_cf.c9
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c9
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c17
-rw-r--r--drivers/pcmcia/yenta_socket.c19
-rw-r--r--drivers/platform/x86/Kconfig10
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/classmate-laptop.c170
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c6
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c829
-rw-r--r--drivers/platform/x86/msi-laptop.c163
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c600
-rw-r--r--drivers/platform/x86/wmi.c103
-rw-r--r--drivers/power/Kconfig15
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/ds2760_battery.c64
-rw-r--r--drivers/power/ds2782_battery.c194
-rw-r--r--drivers/power/max17040_battery.c2
-rw-r--r--drivers/power/pda_power.c10
-rw-r--r--drivers/power/power_supply.h7
-rw-r--r--drivers/power/power_supply_core.c48
-rw-r--r--drivers/power/power_supply_sysfs.c147
-rw-r--r--drivers/power/test_power.c163
-rw-r--r--drivers/power/tosa_battery.c4
-rw-r--r--drivers/power/wm831x_power.c33
-rw-r--r--drivers/power/wm97xx_battery.c3
-rw-r--r--drivers/power/z2_battery.c328
-rw-r--r--drivers/rapidio/Kconfig24
-rw-r--r--drivers/rapidio/Makefile4
-rw-r--r--drivers/rapidio/rio-scan.c424
-rw-r--r--drivers/rapidio/rio.c433
-rw-r--r--drivers/rapidio/rio.h44
-rw-r--r--drivers/rapidio/switches/Kconfig28
-rw-r--r--drivers/rapidio/switches/Makefile9
-rw-r--r--drivers/rapidio/switches/idtcps.c137
-rw-r--r--drivers/rapidio/switches/tsi500.c20
-rw-r--r--drivers/rapidio/switches/tsi568.c146
-rw-r--r--drivers/rapidio/switches/tsi57x.c315
-rw-r--r--drivers/regulator/88pm8607.c533
-rw-r--r--drivers/regulator/ab3100.c45
-rw-r--r--drivers/regulator/bq24022.c1
-rw-r--r--drivers/regulator/core.c83
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/max1586.c1
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/max8660.c1
-rw-r--r--drivers/regulator/mc13783-regulator.c6
-rw-r--r--drivers/regulator/tps65023-regulator.c3
-rw-r--r--drivers/regulator/tps6507x-regulator.c373
-rw-r--r--drivers/regulator/twl-regulator.c138
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ab3100.c41
-rw-r--r--drivers/rtc/rtc-ab8500.c363
-rw-r--r--drivers/rtc/rtc-cmos.c88
-rw-r--r--drivers/rtc/rtc-ds1302.c85
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/rtc/rtc-isl1208.c45
-rw-r--r--drivers/rtc/rtc-m41t80.c22
-rw-r--r--drivers/rtc/rtc-mpc5121.c14
-rw-r--r--drivers/rtc/rtc-mxc.c25
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-s35390a.c2
-rw-r--r--drivers/rtc/rtc-s3c.c112
-rw-r--r--drivers/rtc/rtc-wm831x.c16
-rw-r--r--drivers/s390/block/dasd.c23
-rw-r--r--drivers/s390/block/dasd_eckd.c1
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/cio/ccwgroup.c7
-rw-r--r--drivers/s390/cio/ccwreq.c15
-rw-r--r--drivers/s390/cio/ioasm.h15
-rw-r--r--drivers/s390/cio/itcw.c2
-rw-r--r--drivers/sbus/char/bbc_envctrl.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c11
-rw-r--r--drivers/sbus/char/display7seg.c9
-rw-r--r--drivers/sbus/char/envctrl.c9
-rw-r--r--drivers/sbus/char/flash.c11
-rw-r--r--drivers/sbus/char/openprom.c44
-rw-r--r--drivers/sbus/char/uctrl.c9
-rw-r--r--drivers/scsi/3w-9xxx.c10
-rw-r--r--drivers/scsi/3w-sas.c8
-rw-r--r--drivers/scsi/3w-xxxx.c11
-rw-r--r--drivers/scsi/a2091.c245
-rw-r--r--drivers/scsi/a2091.h4
-rw-r--r--drivers/scsi/a3000.c256
-rw-r--r--drivers/scsi/a3000.h4
-rw-r--r--drivers/scsi/a4000t.c101
-rw-r--r--drivers/scsi/aacraid/commctrl.c4
-rw-r--r--drivers/scsi/aacraid/linit.c11
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h29
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c684
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c5
-rw-r--r--drivers/scsi/bfa/bfa_core.c22
-rw-r--r--drivers/scsi/dpt_i2o.c20
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/gdth.c20
-rw-r--r--drivers/scsi/gvp11.c541
-rw-r--r--drivers/scsi/gvp11.h11
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ipr.c221
-rw-r--r--drivers/scsi/ipr.h31
-rw-r--r--drivers/scsi/iscsi_tcp.c6
-rw-r--r--drivers/scsi/mac53c94.c7
-rw-r--r--drivers/scsi/megaraid.c20
-rw-r--r--drivers/scsi/megaraid.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c22
-rw-r--r--drivers/scsi/mesh.c7
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c2
-rw-r--r--drivers/scsi/mvme147.c33
-rw-r--r--drivers/scsi/osst.c23
-rw-r--r--drivers/scsi/qlogicpti.c17
-rw-r--r--drivers/scsi/scsi_scan.c35
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/scsi/sg.c17
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/sun_esp.c23
-rw-r--r--drivers/serial/68328serial.c2
-rw-r--r--drivers/serial/8250_pci.c71
-rw-r--r--drivers/serial/altera_uart.c44
-rw-r--r--drivers/serial/apbuart.c10
-rw-r--r--drivers/serial/bfin_5xx.c9
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c9
-rw-r--r--drivers/serial/mpc52xx_uart.c84
-rw-r--r--drivers/serial/msm_serial.c21
-rw-r--r--drivers/serial/msm_serial.h56
-rw-r--r--drivers/serial/nwpserial.c2
-rw-r--r--drivers/serial/of_serial.c12
-rw-r--r--drivers/serial/pmac_zilog.c9
-rw-r--r--drivers/serial/s5pv210.c8
-rw-r--r--drivers/serial/serial_cs.c18
-rw-r--r--drivers/serial/sh-sci.c7
-rw-r--r--drivers/serial/sunhv.c9
-rw-r--r--drivers/serial/sunsab.c13
-rw-r--r--drivers/serial/sunsu.c13
-rw-r--r--drivers/serial/sunzilog.c15
-rw-r--r--drivers/serial/uartlite.c11
-rw-r--r--drivers/serial/ucc_uart.c10
-rw-r--r--drivers/sfi/sfi_acpi.c41
-rw-r--r--drivers/sfi/sfi_core.c107
-rw-r--r--drivers/sfi/sfi_core.h8
-rw-r--r--drivers/sh/intc.c12
-rw-r--r--drivers/spi/Kconfig17
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/amba-pl022.c250
-rw-r--r--drivers/spi/davinci_spi.c12
-rw-r--r--drivers/spi/ep93xx_spi.c938
-rw-r--r--drivers/spi/mpc512x_psc_spi.c576
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c15
-rw-r--r--drivers/spi/mpc52xx_spi.c22
-rw-r--r--drivers/spi/omap2_mcspi.c153
-rw-r--r--drivers/spi/spi_bitbang_txrx.h93
-rw-r--r--drivers/spi/spi_butterfly.c3
-rw-r--r--drivers/spi/spi_gpio.c3
-rw-r--r--drivers/spi/spi_lm70llp.c3
-rw-r--r--drivers/spi/spi_mpc8xxx.c125
-rw-r--r--drivers/spi/spi_ppc4xx.c4
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c3
-rw-r--r--drivers/spi/spi_sh_sci.c3
-rw-r--r--drivers/spi/xilinx_spi_of.c10
-rw-r--r--drivers/ssb/pci.c9
-rw-r--r--drivers/ssb/sprom.c1
-rw-r--r--drivers/staging/Kconfig24
-rw-r--r--drivers/staging/Makefile14
-rw-r--r--drivers/staging/adis16255/Kconfig11
-rw-r--r--drivers/staging/adis16255/Makefile1
-rw-r--r--drivers/staging/adis16255/adis16255.c466
-rw-r--r--drivers/staging/adis16255/adis16255.h12
-rw-r--r--drivers/staging/arlan/Kconfig15
-rw-r--r--drivers/staging/arlan/Makefile3
-rw-r--r--drivers/staging/arlan/TODO7
-rw-r--r--drivers/staging/arlan/arlan-main.c1883
-rw-r--r--drivers/staging/arlan/arlan-proc.c1210
-rw-r--r--drivers/staging/arlan/arlan.h535
-rw-r--r--drivers/staging/asus_oled/asus_oled.c2
-rw-r--r--drivers/staging/batman-adv/CHANGELOG14
-rw-r--r--drivers/staging/batman-adv/Makefile4
-rw-r--r--drivers/staging/batman-adv/README255
-rw-r--r--drivers/staging/batman-adv/TODO23
-rw-r--r--drivers/staging/batman-adv/aggregation.c45
-rw-r--r--drivers/staging/batman-adv/aggregation.h7
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.c484
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.h29
-rw-r--r--drivers/staging/batman-adv/bitarray.c76
-rw-r--r--drivers/staging/batman-adv/bitarray.h2
-rw-r--r--drivers/staging/batman-adv/device.c39
-rw-r--r--drivers/staging/batman-adv/device.h2
-rw-r--r--drivers/staging/batman-adv/hard-interface.c530
-rw-r--r--drivers/staging/batman-adv/hard-interface.h20
-rw-r--r--drivers/staging/batman-adv/hash.c2
-rw-r--r--drivers/staging/batman-adv/hash.h2
-rw-r--r--drivers/staging/batman-adv/main.c63
-rw-r--r--drivers/staging/batman-adv/main.h23
-rw-r--r--drivers/staging/batman-adv/originator.c295
-rw-r--r--drivers/staging/batman-adv/originator.h7
-rw-r--r--drivers/staging/batman-adv/packet.h2
-rw-r--r--drivers/staging/batman-adv/proc.c670
-rw-r--r--drivers/staging/batman-adv/proc.h40
-rw-r--r--drivers/staging/batman-adv/ring_buffer.c2
-rw-r--r--drivers/staging/batman-adv/ring_buffer.h2
-rw-r--r--drivers/staging/batman-adv/routing.c221
-rw-r--r--drivers/staging/batman-adv/routing.h2
-rw-r--r--drivers/staging/batman-adv/send.c169
-rw-r--r--drivers/staging/batman-adv/send.h6
-rw-r--r--drivers/staging/batman-adv/soft-interface.c30
-rw-r--r--drivers/staging/batman-adv/soft-interface.h2
-rw-r--r--drivers/staging/batman-adv/translation-table.c92
-rw-r--r--drivers/staging/batman-adv/translation-table.h8
-rw-r--r--drivers/staging/batman-adv/types.h54
-rw-r--r--drivers/staging/batman-adv/vis.c353
-rw-r--r--drivers/staging/batman-adv/vis.h19
-rw-r--r--drivers/staging/comedi/Kconfig1323
-rw-r--r--drivers/staging/comedi/Makefile1
-rw-r--r--drivers/staging/comedi/comedi.h202
-rw-r--r--drivers/staging/comedi/comedi_compat32.c3
-rw-r--r--drivers/staging/comedi/comedi_fops.c280
-rw-r--r--drivers/staging/comedi/comedi_fops.h1
-rw-r--r--drivers/staging/comedi/comedi_ksyms.c69
-rw-r--r--drivers/staging/comedi/comedidev.h34
-rw-r--r--drivers/staging/comedi/comedilib.h170
-rw-r--r--drivers/staging/comedi/drivers.c112
-rw-r--r--drivers/staging/comedi/drivers/8253.h3
-rw-r--r--drivers/staging/comedi/drivers/8255.c16
-rw-r--r--drivers/staging/comedi/drivers/8255.h22
-rw-r--r--drivers/staging/comedi/drivers/Makefile245
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h6
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c27
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/amcc_s5933_58.h4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c6
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.h16
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1032.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c94
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c191
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1710.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2016.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3001.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3300.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c10
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7230.c206
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c41
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c857
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c27
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c153
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c216
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c4
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c41
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c116
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c132
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c16
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c94
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c3
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c13
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c10
-rw-r--r--drivers/staging/comedi/drivers/das08.c156
-rw-r--r--drivers/staging/comedi/drivers/das08.h2
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c5
-rw-r--r--drivers/staging/comedi/drivers/das16.c159
-rw-r--r--drivers/staging/comedi/drivers/das1800.c10
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c2
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c191
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c38
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c225
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c21
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.h3
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c1
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/mite.h2
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c185
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c47
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c13
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c25
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c107
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h4
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c42
-rw-r--r--drivers/staging/comedi/drivers/skel.c21
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c86
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c2
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c16
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c49
-rw-r--r--drivers/staging/comedi/internal.h12
-rw-r--r--drivers/staging/comedi/kcomedilib/Makefile9
-rw-r--r--drivers/staging/comedi/kcomedilib/data.c92
-rw-r--r--drivers/staging/comedi/kcomedilib/dio.c95
-rw-r--r--drivers/staging/comedi/kcomedilib/get.c293
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c523
-rw-r--r--drivers/staging/comedi/kcomedilib/ksyms.c146
-rw-r--r--drivers/staging/comedi/pci_ids.h31
-rw-r--r--drivers/staging/comedi/proc.c23
-rw-r--r--drivers/staging/comedi/range.c36
-rw-r--r--drivers/staging/comedi/wrapper.h25
-rw-r--r--drivers/staging/crystalhd/TODO1
-rw-r--r--drivers/staging/crystalhd/bc_dts_defs.h72
-rw-r--r--drivers/staging/crystalhd/bc_dts_glob_lnx.h119
-rw-r--r--drivers/staging/crystalhd/bc_dts_types.h25
-rw-r--r--drivers/staging/crystalhd/bcm_70012_regs.h24
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.c178
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.h21
-rw-r--r--drivers/staging/crystalhd/crystalhd_fw_if.h60
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c212
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.h121
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c76
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h10
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c100
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h55
-rw-r--r--drivers/staging/cx25821/cx25821-audio-upstream.c6
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream-ch2.c6
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream.c4
-rw-r--r--drivers/staging/cxt1e1/Kconfig22
-rw-r--r--drivers/staging/cxt1e1/Makefile19
-rw-r--r--drivers/staging/cxt1e1/comet.c568
-rw-r--r--drivers/staging/cxt1e1/comet.h366
-rw-r--r--drivers/staging/cxt1e1/comet_tables.c561
-rw-r--r--drivers/staging/cxt1e1/comet_tables.h85
-rw-r--r--drivers/staging/cxt1e1/functions.c368
-rw-r--r--drivers/staging/cxt1e1/hwprobe.c402
-rw-r--r--drivers/staging/cxt1e1/libsbew.h581
-rw-r--r--drivers/staging/cxt1e1/linux.c1356
-rw-r--r--drivers/staging/cxt1e1/musycc.c2185
-rw-r--r--drivers/staging/cxt1e1/musycc.h460
-rw-r--r--drivers/staging/cxt1e1/ossiRelease.c39
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.c561
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.h60
-rw-r--r--drivers/staging/cxt1e1/pmcc4.h155
-rw-r--r--drivers/staging/cxt1e1/pmcc4_cpld.h124
-rw-r--r--drivers/staging/cxt1e1/pmcc4_defs.h82
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c1860
-rw-r--r--drivers/staging/cxt1e1/pmcc4_ioctls.h81
-rw-r--r--drivers/staging/cxt1e1/pmcc4_private.h296
-rw-r--r--drivers/staging/cxt1e1/pmcc4_sysdep.h62
-rw-r--r--drivers/staging/cxt1e1/sbe_bid.h61
-rw-r--r--drivers/staging/cxt1e1/sbe_promformat.h157
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h310
-rw-r--r--drivers/staging/cxt1e1/sbecrc.c137
-rw-r--r--drivers/staging/cxt1e1/sbeid.c217
-rw-r--r--drivers/staging/cxt1e1/sbeproc.c358
-rw-r--r--drivers/staging/cxt1e1/sbeproc.h52
-rw-r--r--drivers/staging/cxt1e1/sbew_ioc.h136
-rw-r--r--drivers/staging/dream/Kconfig11
-rw-r--r--drivers/staging/dream/Makefile3
-rw-r--r--drivers/staging/dream/TODO1
-rw-r--r--drivers/staging/dream/camera/msm_vfe8x_proc.c2
-rw-r--r--drivers/staging/dream/pmem.c27
-rw-r--r--drivers/staging/dream/qdsp5/audio_out.c9
-rw-r--r--drivers/staging/dream/smd/Kconfig26
-rw-r--r--drivers/staging/dream/smd/Makefile7
-rw-r--r--drivers/staging/dream/smd/rpc_server_dog_keepalive.c68
-rw-r--r--drivers/staging/dream/smd/rpc_server_time_remote.c77
-rw-r--r--drivers/staging/dream/smd/smd.c1330
-rw-r--r--drivers/staging/dream/smd/smd_private.h171
-rw-r--r--drivers/staging/dream/smd/smd_qmi.c855
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter.c1261
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter.h193
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter_device.c377
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter_servers.c230
-rw-r--r--drivers/staging/dream/smd/smd_tty.c208
-rw-r--r--drivers/staging/dream/synaptics_i2c_rmi.c26
-rw-r--r--drivers/staging/dt3155/allocator.c20
-rw-r--r--drivers/staging/dt3155/allocator.h4
-rw-r--r--drivers/staging/dt3155/dt3155.h44
-rw-r--r--drivers/staging/dt3155/dt3155_drv.c380
-rw-r--r--drivers/staging/dt3155/dt3155_io.c24
-rw-r--r--drivers/staging/dt3155/dt3155_isr.c297
-rw-r--r--drivers/staging/dt3155/dt3155_isr.h2
-rw-r--r--drivers/staging/dt3155v4l/Kconfig20
-rw-r--r--drivers/staging/dt3155v4l/Makefile1
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.c1200
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.h224
-rw-r--r--drivers/staging/echo/echo.c2
-rw-r--r--drivers/staging/et131x/et1310_address_map.h7
-rw-r--r--drivers/staging/et131x/et1310_eeprom.c8
-rw-r--r--drivers/staging/et131x/et1310_phy.c2
-rw-r--r--drivers/staging/et131x/et1310_rx.c55
-rw-r--r--drivers/staging/et131x/et1310_rx.h5
-rw-r--r--drivers/staging/et131x/et1310_tx.c2
-rw-r--r--drivers/staging/et131x/et131x_initpci.c12
-rw-r--r--drivers/staging/et131x/et131x_isr.c6
-rw-r--r--drivers/staging/et131x/et131x_netdev.c14
-rw-r--r--drivers/staging/frontier/alphatrack.c34
-rw-r--r--drivers/staging/frontier/tranzport.c36
-rw-r--r--drivers/staging/go7007/go7007-fw.c12
-rw-r--r--drivers/staging/go7007/go7007-usb.c3
-rw-r--r--drivers/staging/go7007/go7007-v4l2.c4
-rw-r--r--drivers/staging/go7007/saa7134-go7007.c11
-rw-r--r--drivers/staging/go7007/wis-saa7113.c1
-rw-r--r--drivers/staging/go7007/wis-saa7115.c1
-rw-r--r--drivers/staging/go7007/wis-sony-tuner.c1
-rw-r--r--drivers/staging/go7007/wis-tw2804.c1
-rw-r--r--drivers/staging/go7007/wis-tw9903.c1
-rw-r--r--drivers/staging/hv/Kconfig8
-rw-r--r--drivers/staging/hv/Makefile11
-rw-r--r--drivers/staging/hv/TODO3
-rw-r--r--drivers/staging/hv/blkvsc.c (renamed from drivers/staging/hv/BlkVsc.c)4
-rw-r--r--drivers/staging/hv/blkvsc_drv.c45
-rw-r--r--drivers/staging/hv/channel.c (renamed from drivers/staging/hv/Channel.c)195
-rw-r--r--drivers/staging/hv/channel.h (renamed from drivers/staging/hv/Channel.h)2
-rw-r--r--drivers/staging/hv/channel_interface.c (renamed from drivers/staging/hv/ChannelInterface.c)2
-rw-r--r--drivers/staging/hv/channel_interface.h (renamed from drivers/staging/hv/ChannelInterface.h)2
-rw-r--r--drivers/staging/hv/channel_mgmt.c (renamed from drivers/staging/hv/ChannelMgmt.c)228
-rw-r--r--drivers/staging/hv/channel_mgmt.h (renamed from drivers/staging/hv/ChannelMgmt.h)6
-rw-r--r--drivers/staging/hv/connection.c (renamed from drivers/staging/hv/Connection.c)31
-rw-r--r--drivers/staging/hv/hv.c (renamed from drivers/staging/hv/Hv.c)28
-rw-r--r--drivers/staging/hv/hv.h (renamed from drivers/staging/hv/Hv.h)0
-rw-r--r--drivers/staging/hv/hv_utils.c295
-rw-r--r--drivers/staging/hv/logging.h7
-rw-r--r--drivers/staging/hv/netvsc.c (renamed from drivers/staging/hv/NetVsc.c)104
-rw-r--r--drivers/staging/hv/netvsc.h (renamed from drivers/staging/hv/NetVsc.h)7
-rw-r--r--drivers/staging/hv/netvsc_api.h (renamed from drivers/staging/hv/NetVscApi.h)9
-rw-r--r--drivers/staging/hv/netvsc_drv.c243
-rw-r--r--drivers/staging/hv/osd.c70
-rw-r--r--drivers/staging/hv/ring_buffer.c (renamed from drivers/staging/hv/RingBuffer.c)16
-rw-r--r--drivers/staging/hv/ring_buffer.h (renamed from drivers/staging/hv/RingBuffer.h)0
-rw-r--r--drivers/staging/hv/rndis.h2
-rw-r--r--drivers/staging/hv/rndis_filter.c (renamed from drivers/staging/hv/RndisFilter.c)56
-rw-r--r--drivers/staging/hv/rndis_filter.h (renamed from drivers/staging/hv/RndisFilter.h)2
-rw-r--r--drivers/staging/hv/storvsc.c (renamed from drivers/staging/hv/StorVsc.c)54
-rw-r--r--drivers/staging/hv/storvsc_api.h (renamed from drivers/staging/hv/StorVscApi.h)2
-rw-r--r--drivers/staging/hv/storvsc_drv.c54
-rw-r--r--drivers/staging/hv/utils.h119
-rw-r--r--drivers/staging/hv/version_info.h (renamed from drivers/staging/hv/VersionInfo.h)7
-rw-r--r--drivers/staging/hv/vmbus.c (renamed from drivers/staging/hv/Vmbus.c)33
-rw-r--r--drivers/staging/hv/vmbus.h2
-rw-r--r--drivers/staging/hv/vmbus_api.h (renamed from drivers/staging/hv/VmbusApi.h)18
-rw-r--r--drivers/staging/hv/vmbus_channel_interface.h (renamed from drivers/staging/hv/VmbusChannelInterface.h)0
-rw-r--r--drivers/staging/hv/vmbus_drv.c96
-rw-r--r--drivers/staging/hv/vmbus_packet_format.h (renamed from drivers/staging/hv/VmbusPacketFormat.h)1
-rw-r--r--drivers/staging/hv/vmbus_private.h (renamed from drivers/staging/hv/VmbusPrivate.h)12
-rw-r--r--drivers/staging/hv/vstorage.h2
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h265
-rw-r--r--drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c237
-rw-r--r--drivers/staging/iio/Documentation/sysfs-class-iio294
-rw-r--r--drivers/staging/iio/Documentation/userspace.txt2
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/Makefile2
-rw-r--r--drivers/staging/iio/accel/Kconfig25
-rw-r--r--drivers/staging/iio/accel/Makefile11
-rw-r--r--drivers/staging/iio/accel/accel.h12
-rw-r--r--drivers/staging/iio/accel/adis16209.h193
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c615
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c266
-rw-r--r--drivers/staging/iio/accel/adis16209_trigger.c124
-rw-r--r--drivers/staging/iio/accel/adis16220.h147
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c670
-rw-r--r--drivers/staging/iio/accel/adis16240.h218
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c599
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c254
-rw-r--r--drivers/staging/iio/accel/adis16240_trigger.c124
-rw-r--r--drivers/staging/iio/accel/inclinometer.h23
-rw-r--r--drivers/staging/iio/accel/kxsd9.c88
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h4
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c173
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c24
-rw-r--r--drivers/staging/iio/accel/sca3000.h2
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c178
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c35
-rw-r--r--drivers/staging/iio/adc/Kconfig11
-rw-r--r--drivers/staging/iio/adc/Makefile2
-rw-r--r--drivers/staging/iio/adc/adc.h15
-rw-r--r--drivers/staging/iio/adc/max1363.h122
-rw-r--r--drivers/staging/iio/adc/max1363_core.c1109
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c82
-rw-r--r--drivers/staging/iio/chrdev.h2
-rw-r--r--drivers/staging/iio/gyro/Kconfig13
-rw-r--r--drivers/staging/iio/gyro/Makefile7
-rw-r--r--drivers/staging/iio/gyro/adis16260.h175
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c661
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c256
-rw-r--r--drivers/staging/iio/gyro/adis16260_trigger.c124
-rw-r--r--drivers/staging/iio/gyro/gyro.h43
-rw-r--r--drivers/staging/iio/iio.h47
-rw-r--r--drivers/staging/iio/imu/Kconfig33
-rw-r--r--drivers/staging/iio/imu/Makefile14
-rw-r--r--drivers/staging/iio/imu/adis16300.h194
-rw-r--r--drivers/staging/iio/imu/adis16300_core.c768
-rw-r--r--drivers/staging/iio/imu/adis16300_ring.c233
-rw-r--r--drivers/staging/iio/imu/adis16300_trigger.c127
-rw-r--r--drivers/staging/iio/imu/adis16350.h193
-rw-r--r--drivers/staging/iio/imu/adis16350_core.c736
-rw-r--r--drivers/staging/iio/imu/adis16350_ring.c286
-rw-r--r--drivers/staging/iio/imu/adis16350_trigger.c127
-rw-r--r--drivers/staging/iio/imu/adis16400.h229
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c800
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c245
-rw-r--r--drivers/staging/iio/imu/adis16400_trigger.c127
-rw-r--r--drivers/staging/iio/industrialio-core.c60
-rw-r--r--drivers/staging/iio/industrialio-ring.c75
-rw-r--r--drivers/staging/iio/industrialio-trigger.c20
-rw-r--r--drivers/staging/iio/light/tsl2563.c20
-rw-r--r--drivers/staging/iio/magnetometer/magnet.h31
-rw-r--r--drivers/staging/iio/ring_generic.h50
-rw-r--r--drivers/staging/iio/ring_sw.c50
-rw-r--r--drivers/staging/iio/sysfs.h15
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c131
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c4
-rw-r--r--drivers/staging/line6/control.h166
-rw-r--r--drivers/staging/line6/driver.c10
-rw-r--r--drivers/staging/line6/dumprequest.c3
-rw-r--r--drivers/staging/line6/pod.c6
-rw-r--r--drivers/staging/line6/variax.c5
-rw-r--r--drivers/staging/memrar/Kconfig15
-rw-r--r--drivers/staging/memrar/Makefile2
-rw-r--r--drivers/staging/memrar/TODO43
-rw-r--r--drivers/staging/memrar/memrar-abi89
-rw-r--r--drivers/staging/memrar/memrar.h155
-rw-r--r--drivers/staging/memrar/memrar_allocator.c432
-rw-r--r--drivers/staging/memrar/memrar_allocator.h149
-rw-r--r--drivers/staging/memrar/memrar_handler.c996
-rw-r--r--drivers/staging/mrst-touchscreen/Kconfig7
-rw-r--r--drivers/staging/mrst-touchscreen/Makefile3
-rw-r--r--drivers/staging/mrst-touchscreen/TODO2
-rw-r--r--drivers/staging/mrst-touchscreen/intel-mid-touch.c864
-rw-r--r--drivers/staging/msm/Kconfig134
-rw-r--r--drivers/staging/msm/Makefile93
-rw-r--r--drivers/staging/msm/TODO3
-rw-r--r--drivers/staging/msm/ebi2_l2f.c569
-rw-r--r--drivers/staging/msm/ebi2_lcd.c250
-rw-r--r--drivers/staging/msm/ebi2_tmd20.c1122
-rw-r--r--drivers/staging/msm/hdmi_sii9022.c248
-rw-r--r--drivers/staging/msm/lcdc.c239
-rw-r--r--drivers/staging/msm/lcdc_external.c54
-rw-r--r--drivers/staging/msm/lcdc_gordon.c446
-rw-r--r--drivers/staging/msm/lcdc_grapefruit.c60
-rw-r--r--drivers/staging/msm/lcdc_panel.c88
-rw-r--r--drivers/staging/msm/lcdc_prism.c64
-rw-r--r--drivers/staging/msm/lcdc_sharp_wvga_pt.c290
-rw-r--r--drivers/staging/msm/lcdc_st15.c237
-rw-r--r--drivers/staging/msm/lcdc_st1_wxga.c54
-rw-r--r--drivers/staging/msm/lcdc_toshiba_wvga_pt.c374
-rw-r--r--drivers/staging/msm/lcdc_wxga.c56
-rw-r--r--drivers/staging/msm/logo.c98
-rw-r--r--drivers/staging/msm/mddi.c375
-rw-r--r--drivers/staging/msm/mddi_ext.c320
-rw-r--r--drivers/staging/msm/mddi_ext_lcd.c91
-rw-r--r--drivers/staging/msm/mddi_prism.c114
-rw-r--r--drivers/staging/msm/mddi_sharp.c892
-rw-r--r--drivers/staging/msm/mddi_toshiba.c1741
-rw-r--r--drivers/staging/msm/mddi_toshiba.h52
-rw-r--r--drivers/staging/msm/mddi_toshiba_vga.c136
-rw-r--r--drivers/staging/msm/mddi_toshiba_wvga.c63
-rw-r--r--drivers/staging/msm/mddi_toshiba_wvga_pt.c64
-rw-r--r--drivers/staging/msm/mddihost.c377
-rw-r--r--drivers/staging/msm/mddihost.h225
-rw-r--r--drivers/staging/msm/mddihost_e.c63
-rw-r--r--drivers/staging/msm/mddihosti.c2239
-rw-r--r--drivers/staging/msm/mddihosti.h547
-rw-r--r--drivers/staging/msm/mdp.c1113
-rw-r--r--drivers/staging/msm/mdp.h695
-rw-r--r--drivers/staging/msm/mdp4.h352
-rw-r--r--drivers/staging/msm/mdp4_debugfs.c181
-rw-r--r--drivers/staging/msm/mdp4_overlay.c1259
-rw-r--r--drivers/staging/msm/mdp4_overlay_lcdc.c313
-rw-r--r--drivers/staging/msm/mdp4_overlay_mddi.c254
-rw-r--r--drivers/staging/msm/mdp4_util.c1686
-rw-r--r--drivers/staging/msm/mdp_cursor.c104
-rw-r--r--drivers/staging/msm/mdp_dma.c561
-rw-r--r--drivers/staging/msm/mdp_dma_lcdc.c379
-rw-r--r--drivers/staging/msm/mdp_dma_s.c139
-rw-r--r--drivers/staging/msm/mdp_dma_tv.c142
-rw-r--r--drivers/staging/msm/mdp_hw_init.c720
-rw-r--r--drivers/staging/msm/mdp_ppp.c1502
-rw-r--r--drivers/staging/msm/mdp_ppp_dq.c347
-rw-r--r--drivers/staging/msm/mdp_ppp_dq.h86
-rw-r--r--drivers/staging/msm/mdp_ppp_v20.c2486
-rw-r--r--drivers/staging/msm/mdp_ppp_v31.c828
-rw-r--r--drivers/staging/msm/mdp_vsync.c389
-rw-r--r--drivers/staging/msm/memory.c214
-rw-r--r--drivers/staging/msm/memory_ll.h61
-rw-r--r--drivers/staging/msm/msm_fb.c2354
-rw-r--r--drivers/staging/msm/msm_fb.h174
-rw-r--r--drivers/staging/msm/msm_fb_bl.c79
-rw-r--r--drivers/staging/msm/msm_fb_def.h201
-rw-r--r--drivers/staging/msm/msm_fb_panel.c136
-rw-r--r--drivers/staging/msm/msm_fb_panel.h145
-rw-r--r--drivers/staging/msm/msm_mdp.h245
-rw-r--r--drivers/staging/msm/staging-devices.c323
-rw-r--r--drivers/staging/msm/tv_ntsc.c163
-rw-r--r--drivers/staging/msm/tv_pal.c213
-rw-r--r--drivers/staging/msm/tvenc.c295
-rw-r--r--drivers/staging/msm/tvenc.h117
-rw-r--r--drivers/staging/netwave/Kconfig11
-rw-r--r--drivers/staging/netwave/Makefile1
-rw-r--r--drivers/staging/netwave/TODO7
-rw-r--r--drivers/staging/netwave/netwave_cs.c1364
-rw-r--r--drivers/staging/otus/80211core/cagg.c34
-rw-r--r--drivers/staging/otus/80211core/ccmd.c3
-rw-r--r--drivers/staging/otus/80211core/cfunc.c3
-rw-r--r--drivers/staging/otus/80211core/cic.c9
-rw-r--r--drivers/staging/otus/80211core/cinit.c3
-rw-r--r--drivers/staging/otus/80211core/cmm.c144
-rw-r--r--drivers/staging/otus/80211core/cmmap.c103
-rw-r--r--drivers/staging/otus/80211core/cmmsta.c123
-rw-r--r--drivers/staging/otus/80211core/coid.c3
-rw-r--r--drivers/staging/otus/80211core/cpsmgr.c3
-rw-r--r--drivers/staging/otus/80211core/ctxrx.c81
-rw-r--r--drivers/staging/otus/80211core/queue.c5
-rw-r--r--drivers/staging/otus/80211core/ratectrl.c3
-rw-r--r--drivers/staging/otus/hal/hpani.c33
-rw-r--r--drivers/staging/otus/hal/hpani.h7
-rw-r--r--drivers/staging/otus/hal/hpfw2.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu_2k.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu_BA.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu_OTUS_RC.c2
-rw-r--r--drivers/staging/otus/hal/hpfwuinit.c2
-rw-r--r--drivers/staging/otus/hal/hpmain.c30
-rw-r--r--drivers/staging/otus/hal/hpreg.c1586
-rw-r--r--drivers/staging/otus/ioctl.c37
-rw-r--r--drivers/staging/otus/usbdrv.c6
-rw-r--r--drivers/staging/otus/wwrap.c1
-rw-r--r--drivers/staging/otus/zdusb.c6
-rw-r--r--drivers/staging/panel/panel.c5
-rw-r--r--drivers/staging/phison/Kconfig2
-rw-r--r--drivers/staging/phison/phison.c4
-rw-r--r--drivers/staging/poch/Kconfig6
-rw-r--r--drivers/staging/poch/Makefile1
-rw-r--r--drivers/staging/poch/README136
-rw-r--r--drivers/staging/poch/poch.c1443
-rw-r--r--drivers/staging/poch/poch.h35
-rw-r--r--drivers/staging/pohmelfs/config.c41
-rw-r--r--drivers/staging/pohmelfs/crypto.c38
-rw-r--r--drivers/staging/pohmelfs/dir.c14
-rw-r--r--drivers/staging/pohmelfs/inode.c74
-rw-r--r--drivers/staging/pohmelfs/net.c38
-rw-r--r--drivers/staging/pohmelfs/netfs.h17
-rw-r--r--drivers/staging/quatech_usb2/quatech_usb2.c12
-rw-r--r--drivers/staging/ramzswap/TODO5
-rw-r--r--drivers/staging/ramzswap/ramzswap_drv.c667
-rw-r--r--drivers/staging/ramzswap/ramzswap_drv.h51
-rw-r--r--drivers/staging/ramzswap/ramzswap_ioctl.h14
-rw-r--r--drivers/staging/rar_register/Kconfig28
-rw-r--r--drivers/staging/rar_register/rar_register.c610
-rw-r--r--drivers/staging/rar_register/rar_register.h62
-rw-r--r--drivers/staging/rt2860/chip/mac_pci.h41
-rw-r--r--drivers/staging/rt2860/chip/mac_usb.h55
-rw-r--r--drivers/staging/rt2860/chip/rtmp_mac.h52
-rw-r--r--drivers/staging/rt2860/chip/rtmp_phy.h338
-rw-r--r--drivers/staging/rt2860/chips/rt3070.c4
-rw-r--r--drivers/staging/rt2860/chips/rt3090.c4
-rw-r--r--drivers/staging/rt2860/chips/rt30xx.c9
-rw-r--r--drivers/staging/rt2860/common/cmm_aes.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_data.c8
-rw-r--r--drivers/staging/rt2860/common/cmm_mac_pci.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_mac_usb.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_wpa.c18
-rw-r--r--drivers/staging/rt2860/common/rtmp_init.c15
-rw-r--r--drivers/staging/rt2860/common/spectrum.c7
-rw-r--r--drivers/staging/rt2860/mlme.h2
-rw-r--r--drivers/staging/rt2860/pci_main_dev.c37
-rw-r--r--drivers/staging/rt2860/rt_linux.c51
-rw-r--r--drivers/staging/rt2860/rt_linux.h14
-rw-r--r--drivers/staging/rt2860/rt_main_dev.c20
-rw-r--r--drivers/staging/rt2860/rt_pci_rbus.c24
-rw-r--r--drivers/staging/rt2860/rt_usb.c20
-rw-r--r--drivers/staging/rt2860/rtmp.h412
-rw-r--r--drivers/staging/rt2860/sta/assoc.c1
-rw-r--r--drivers/staging/rt2860/sta_ioctl.c17
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c5
-rw-r--r--drivers/staging/rt2870/Kconfig2
-rw-r--r--drivers/staging/rt2870/common/rtusb_bulk.c42
-rw-r--r--drivers/staging/rt2870/common/rtusb_data.c8
-rw-r--r--drivers/staging/rt2870/common/rtusb_io.c39
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_module.c7
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c20
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c2076
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225z2.c129
-rw-r--r--drivers/staging/rtl8192e/Makefile1
-rw-r--r--drivers/staging/rtl8192e/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_module.c9
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c17
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_Qos.h168
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_TSProc.c38
-rw-r--r--drivers/staging/rtl8192e/r8190_rtl8256.c4
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c14
-rw-r--r--drivers/staging/rtl8192e/r8192_pm.c6
-rw-r--r--drivers/staging/rtl8192e/r8192_pm.h4
-rw-r--r--drivers/staging/rtl8192su/Kconfig1
-rw-r--r--drivers/staging/rtl8192su/Makefile2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_module.c9
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h24
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c17
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h1
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c12
-rw-r--r--drivers/staging/rtl8192su/r8180_93cx6.c146
-rw-r--r--drivers/staging/rtl8192su/r8180_93cx6.h40
-rw-r--r--drivers/staging/rtl8192su/r8192SU_led.c2347
-rw-r--r--drivers/staging/rtl8192su/r8192SU_led.h93
-rw-r--r--drivers/staging/rtl8192su/r8192S_firmware.c503
-rw-r--r--drivers/staging/rtl8192su/r8192S_phy.c194
-rw-r--r--drivers/staging/rtl8192su/r8192U.h41
-rw-r--r--drivers/staging/rtl8192su/r8192U_core.c626
-rw-r--r--drivers/staging/rtl8192su/r819xU_cmdpkt.c663
-rw-r--r--drivers/staging/rtl8192su/r819xU_cmdpkt.h366
-rw-r--r--drivers/staging/rtl8192u/dot11d.h52
-rw-r--r--drivers/staging/rtl8192u/ieee80211.h548
-rw-r--r--drivers/staging/rtl8192u/ieee80211/api.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c9
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c7
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c9
-rw-r--r--drivers/staging/rtl8192u/ieee80211_crypt.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c12
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c1
-rw-r--r--drivers/staging/sep/sep_driver.c53
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c26
-rw-r--r--drivers/staging/sm7xx/smtcfb.c18
-rw-r--r--drivers/staging/sm7xx/smtcfb.h2
-rw-r--r--drivers/staging/strip/Kconfig22
-rw-r--r--drivers/staging/strip/Makefile1
-rw-r--r--drivers/staging/strip/TODO7
-rw-r--r--drivers/staging/strip/strip.c2823
-rw-r--r--drivers/staging/ti-st/Kconfig25
-rw-r--r--drivers/staging/ti-st/Makefile7
-rw-r--r--drivers/staging/ti-st/TODO19
-rw-r--r--drivers/staging/ti-st/bt_drv.c502
-rw-r--r--drivers/staging/ti-st/bt_drv.h61
-rw-r--r--drivers/staging/ti-st/fm.h13
-rw-r--r--drivers/staging/ti-st/st.h90
-rw-r--r--drivers/staging/ti-st/st_core.c1062
-rw-r--r--drivers/staging/ti-st/st_core.h98
-rw-r--r--drivers/staging/ti-st/st_kim.c754
-rw-r--r--drivers/staging/ti-st/st_kim.h150
-rw-r--r--drivers/staging/ti-st/st_ll.c147
-rw-r--r--drivers/staging/ti-st/st_ll.h62
-rw-r--r--drivers/staging/ti-st/sysfs-uim16
-rw-r--r--drivers/staging/tm6000/Kconfig3
-rw-r--r--drivers/staging/tm6000/tm6000-alsa.c25
-rw-r--r--drivers/staging/tm6000/tm6000-cards.c45
-rw-r--r--drivers/staging/tm6000/tm6000-core.c121
-rw-r--r--drivers/staging/tm6000/tm6000-dvb.c74
-rw-r--r--drivers/staging/tm6000/tm6000-video.c104
-rw-r--r--drivers/staging/tm6000/tm6000.h25
-rw-r--r--drivers/staging/udlfb/udlfb.c62
-rw-r--r--drivers/staging/usbip/stub_rx.c4
-rw-r--r--drivers/staging/usbip/usbip_common.h2
-rw-r--r--drivers/staging/usbip/vhci_hcd.c2
-rw-r--r--drivers/staging/usbip/vhci_tx.c2
-rw-r--r--drivers/staging/vme/boards/vme_vmivme7805.c1
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c117
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c398
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.h26
-rw-r--r--drivers/staging/vme/devices/vme_user.c18
-rw-r--r--drivers/staging/vme/vme.c4
-rw-r--r--drivers/staging/vt6655/80211hdr.h46
-rw-r--r--drivers/staging/vt6655/80211mgr.c88
-rw-r--r--drivers/staging/vt6655/80211mgr.h88
-rw-r--r--drivers/staging/vt6655/IEEE11h.c6
-rw-r--r--drivers/staging/vt6655/IEEE11h.h2
-rw-r--r--drivers/staging/vt6655/aes_ccmp.c10
-rw-r--r--drivers/staging/vt6655/baseband.c64
-rw-r--r--drivers/staging/vt6655/baseband.h52
-rw-r--r--drivers/staging/vt6655/bssdb.c214
-rw-r--r--drivers/staging/vt6655/bssdb.h148
-rw-r--r--drivers/staging/vt6655/card.c214
-rw-r--r--drivers/staging/vt6655/card.h158
-rw-r--r--drivers/staging/vt6655/datarate.c52
-rw-r--r--drivers/staging/vt6655/datarate.h36
-rw-r--r--drivers/staging/vt6655/desc.h12
-rw-r--r--drivers/staging/vt6655/device.h28
-rw-r--r--drivers/staging/vt6655/device_main.c93
-rw-r--r--drivers/staging/vt6655/dpc.c184
-rw-r--r--drivers/staging/vt6655/dpc.h6
-rw-r--r--drivers/staging/vt6655/hostap.c13
-rw-r--r--drivers/staging/vt6655/hostap.h4
-rw-r--r--drivers/staging/vt6655/ioctl.c32
-rw-r--r--drivers/staging/vt6655/ioctl.h10
-rw-r--r--drivers/staging/vt6655/iwctl.c31
-rw-r--r--drivers/staging/vt6655/key.c36
-rw-r--r--drivers/staging/vt6655/key.h30
-rw-r--r--drivers/staging/vt6655/mac.c8
-rw-r--r--drivers/staging/vt6655/mac.h52
-rw-r--r--drivers/staging/vt6655/mib.c6
-rw-r--r--drivers/staging/vt6655/mib.h2
-rw-r--r--drivers/staging/vt6655/michael.c24
-rw-r--r--drivers/staging/vt6655/michael.h8
-rw-r--r--drivers/staging/vt6655/power.c26
-rw-r--r--drivers/staging/vt6655/power.h28
-rw-r--r--drivers/staging/vt6655/rc4.c4
-rw-r--r--drivers/staging/vt6655/rc4.h2
-rw-r--r--drivers/staging/vt6655/rf.c22
-rw-r--r--drivers/staging/vt6655/rf.h14
-rw-r--r--drivers/staging/vt6655/rxtx.c510
-rw-r--r--drivers/staging/vt6655/rxtx.h84
-rw-r--r--drivers/staging/vt6655/srom.c54
-rw-r--r--drivers/staging/vt6655/srom.h2
-rw-r--r--drivers/staging/vt6655/tether.c2
-rw-r--r--drivers/staging/vt6655/tether.h31
-rw-r--r--drivers/staging/vt6655/tkip.c2
-rw-r--r--drivers/staging/vt6655/tkip.h2
-rw-r--r--drivers/staging/vt6655/ttype.h22
-rw-r--r--drivers/staging/vt6655/vntwifi.c142
-rw-r--r--drivers/staging/vt6655/vntwifi.h142
-rw-r--r--drivers/staging/vt6655/wcmd.c104
-rw-r--r--drivers/staging/vt6655/wcmd.h26
-rw-r--r--drivers/staging/vt6655/wctl.c4
-rw-r--r--drivers/staging/vt6655/wmgr.c672
-rw-r--r--drivers/staging/vt6655/wmgr.h96
-rw-r--r--drivers/staging/vt6655/wpa.c14
-rw-r--r--drivers/staging/vt6655/wpa.h14
-rw-r--r--drivers/staging/vt6655/wpa2.c16
-rw-r--r--drivers/staging/vt6655/wpa2.h14
-rw-r--r--drivers/staging/vt6655/wpactl.c19
-rw-r--r--drivers/staging/vt6655/wroute.c6
-rw-r--r--drivers/staging/vt6656/80211hdr.h83
-rw-r--r--drivers/staging/vt6656/80211mgr.c88
-rw-r--r--drivers/staging/vt6656/80211mgr.h136
-rw-r--r--drivers/staging/vt6656/aes_ccmp.c564
-rw-r--r--drivers/staging/vt6656/aes_ccmp.h2
-rw-r--r--drivers/staging/vt6656/baseband.c114
-rw-r--r--drivers/staging/vt6656/baseband.h59
-rw-r--r--drivers/staging/vt6656/bssdb.c402
-rw-r--r--drivers/staging/vt6656/bssdb.h252
-rw-r--r--drivers/staging/vt6656/card.c68
-rw-r--r--drivers/staging/vt6656/card.h62
-rw-r--r--drivers/staging/vt6656/channel.c12
-rw-r--r--drivers/staging/vt6656/channel.h10
-rw-r--r--drivers/staging/vt6656/control.c87
-rw-r--r--drivers/staging/vt6656/control.h31
-rw-r--r--drivers/staging/vt6656/datarate.c71
-rw-r--r--drivers/staging/vt6656/datarate.h39
-rw-r--r--drivers/staging/vt6656/desc.h12
-rw-r--r--drivers/staging/vt6656/device.h218
-rw-r--r--drivers/staging/vt6656/dpc.c305
-rw-r--r--drivers/staging/vt6656/dpc.h27
-rw-r--r--drivers/staging/vt6656/firmware.c6
-rw-r--r--drivers/staging/vt6656/firmware.h9
-rw-r--r--drivers/staging/vt6656/hostap.c27
-rw-r--r--drivers/staging/vt6656/hostap.h9
-rw-r--r--drivers/staging/vt6656/int.c251
-rw-r--r--drivers/staging/vt6656/int.h12
-rw-r--r--drivers/staging/vt6656/iocmd.h353
-rw-r--r--drivers/staging/vt6656/ioctl.c47
-rw-r--r--drivers/staging/vt6656/ioctl.h15
-rw-r--r--drivers/staging/vt6656/iowpa.h4
-rw-r--r--drivers/staging/vt6656/iwctl.c52
-rw-r--r--drivers/staging/vt6656/iwctl.h5
-rw-r--r--drivers/staging/vt6656/key.c87
-rw-r--r--drivers/staging/vt6656/key.h62
-rw-r--r--drivers/staging/vt6656/mac.c14
-rw-r--r--drivers/staging/vt6656/mac.h9
-rw-r--r--drivers/staging/vt6656/main_usb.c223
-rw-r--r--drivers/staging/vt6656/mib.c115
-rw-r--r--drivers/staging/vt6656/mib.h192
-rw-r--r--drivers/staging/vt6656/michael.c181
-rw-r--r--drivers/staging/vt6656/michael.h12
-rw-r--r--drivers/staging/vt6656/power.c58
-rw-r--r--drivers/staging/vt6656/power.h45
-rw-r--r--drivers/staging/vt6656/rc4.c84
-rw-r--r--drivers/staging/vt6656/rc4.h13
-rw-r--r--drivers/staging/vt6656/rf.c32
-rw-r--r--drivers/staging/vt6656/rf.h33
-rw-r--r--drivers/staging/vt6656/rndis.h3
-rw-r--r--drivers/staging/vt6656/rxtx.c868
-rw-r--r--drivers/staging/vt6656/rxtx.h46
-rw-r--r--drivers/staging/vt6656/srom.h2
-rw-r--r--drivers/staging/vt6656/tcrc.c23
-rw-r--r--drivers/staging/vt6656/tcrc.h11
-rw-r--r--drivers/staging/vt6656/tether.c45
-rw-r--r--drivers/staging/vt6656/tether.h33
-rw-r--r--drivers/staging/vt6656/tkip.c2
-rw-r--r--drivers/staging/vt6656/tkip.h7
-rw-r--r--drivers/staging/vt6656/tmacro.h4
-rw-r--r--drivers/staging/vt6656/ttype.h57
-rw-r--r--drivers/staging/vt6656/upc.h8
-rw-r--r--drivers/staging/vt6656/usbpipe.c138
-rw-r--r--drivers/staging/vt6656/usbpipe.h51
-rw-r--r--drivers/staging/vt6656/wcmd.c172
-rw-r--r--drivers/staging/vt6656/wcmd.h36
-rw-r--r--drivers/staging/vt6656/wctl.c23
-rw-r--r--drivers/staging/vt6656/wctl.h13
-rw-r--r--drivers/staging/vt6656/wmgr.c960
-rw-r--r--drivers/staging/vt6656/wmgr.h180
-rw-r--r--drivers/staging/vt6656/wpa.c14
-rw-r--r--drivers/staging/vt6656/wpa.h16
-rw-r--r--drivers/staging/vt6656/wpa2.c50
-rw-r--r--drivers/staging/vt6656/wpa2.h20
-rw-r--r--drivers/staging/vt6656/wpactl.c25
-rw-r--r--drivers/staging/vt6656/wpactl.h9
-rw-r--r--drivers/staging/wavelan/Kconfig38
-rw-r--r--drivers/staging/wavelan/Makefile2
-rw-r--r--drivers/staging/wavelan/TODO7
-rw-r--r--drivers/staging/wavelan/i82586.h413
-rw-r--r--drivers/staging/wavelan/wavelan.c4383
-rw-r--r--drivers/staging/wavelan/wavelan.h370
-rw-r--r--drivers/staging/wavelan/wavelan.p.h696
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c4601
-rw-r--r--drivers/staging/wavelan/wavelan_cs.h386
-rw-r--r--drivers/staging/wavelan/wavelan_cs.p.h766
-rw-r--r--drivers/staging/winbond/TODO (renamed from drivers/staging/winbond/README)3
-rw-r--r--drivers/staging/winbond/core.h14
-rw-r--r--drivers/staging/winbond/localpara.h452
-rw-r--r--drivers/staging/winbond/mac_structures.h342
-rw-r--r--drivers/staging/winbond/mds.c324
-rw-r--r--drivers/staging/winbond/mds_f.h20
-rw-r--r--drivers/staging/winbond/mds_s.h173
-rw-r--r--drivers/staging/winbond/mlme_s.h288
-rw-r--r--drivers/staging/winbond/mlmetxrx.c62
-rw-r--r--drivers/staging/winbond/mlmetxrx_f.h6
-rw-r--r--drivers/staging/winbond/mto.c299
-rw-r--r--drivers/staging/winbond/mto.h179
-rw-r--r--drivers/staging/winbond/phy_calibration.c9
-rw-r--r--drivers/staging/winbond/phy_calibration.h177
-rw-r--r--drivers/staging/winbond/reg.c3985
-rw-r--r--drivers/staging/winbond/scan_s.h152
-rw-r--r--drivers/staging/winbond/sme_api.h211
-rw-r--r--drivers/staging/winbond/sysdef.h18
-rw-r--r--drivers/staging/winbond/wb35reg.c618
-rw-r--r--drivers/staging/winbond/wb35reg_f.h104
-rw-r--r--drivers/staging/winbond/wb35reg_s.h221
-rw-r--r--drivers/staging/winbond/wb35rx.c258
-rw-r--r--drivers/staging/winbond/wb35tx_f.h18
-rw-r--r--drivers/staging/winbond/wbhal_f.h137
-rw-r--r--drivers/staging/winbond/wbhal_s.h502
-rw-r--r--drivers/staging/winbond/wblinux_f.h19
-rw-r--r--drivers/staging/winbond/wbusb.c187
-rw-r--r--drivers/staging/winbond/wbusb_s.h23
-rw-r--r--drivers/staging/wlags49_h2/Kconfig12
-rw-r--r--drivers/staging/wlags49_h2/README.wlags492
-rw-r--r--drivers/staging/wlags49_h2/ap_h2.c80
-rw-r--r--drivers/staging/wlags49_h2/debug.h104
-rw-r--r--drivers/staging/wlags49_h2/dhf.c143
-rw-r--r--drivers/staging/wlags49_h2/dhf.h56
-rw-r--r--drivers/staging/wlags49_h2/dhfcfg.h118
-rw-r--r--drivers/staging/wlags49_h2/hcf.c15
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c32
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_internal.h8
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c3
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c39
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c11
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.c957
-rw-r--r--drivers/staging/wlags49_h2/wl_sysfs.c3
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c9
-rw-r--r--drivers/staging/wlags49_h25/Kconfig12
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c154
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c5
-rw-r--r--drivers/staging/wlan-ng/p80211req.c5
-rw-r--r--drivers/staging/wlan-ng/p80211wext.c399
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c36
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c71
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c65
-rw-r--r--drivers/staging/xgifb/Kconfig11
-rw-r--r--drivers/staging/xgifb/Makefile4
-rw-r--r--drivers/staging/xgifb/TODO15
-rw-r--r--drivers/staging/xgifb/XGI.h10
-rw-r--r--drivers/staging/xgifb/XGI_accel.c596
-rw-r--r--drivers/staging/xgifb/XGI_accel.h511
-rw-r--r--drivers/staging/xgifb/XGI_main.h1023
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c3773
-rw-r--r--drivers/staging/xgifb/XGIfb.h215
-rw-r--r--drivers/staging/xgifb/osdef.h153
-rw-r--r--drivers/staging/xgifb/vb_def.h1017
-rw-r--r--drivers/staging/xgifb/vb_ext.c1370
-rw-r--r--drivers/staging/xgifb/vb_ext.h32
-rw-r--r--drivers/staging/xgifb/vb_init.c3444
-rw-r--r--drivers/staging/xgifb/vb_init.h7
-rw-r--r--drivers/staging/xgifb/vb_setmode.c10736
-rw-r--r--drivers/staging/xgifb/vb_setmode.h40
-rw-r--r--drivers/staging/xgifb/vb_struct.h534
-rw-r--r--drivers/staging/xgifb/vb_table.h4406
-rw-r--r--drivers/staging/xgifb/vb_util.c263
-rw-r--r--drivers/staging/xgifb/vb_util.h15
-rw-r--r--drivers/staging/xgifb/vgatypes.h325
-rw-r--r--drivers/telephony/ixj.c15
-rw-r--r--drivers/usb/atm/speedtch.c5
-rw-r--r--drivers/usb/class/cdc-acm.c6
-rw-r--r--drivers/usb/core/driver.c2
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/f_audio.c4
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c11
-rw-r--r--drivers/usb/gadget/m66592-udc.h22
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c4
-rw-r--r--drivers/usb/gadget/r8a66597-udc.h24
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c57
-rw-r--r--drivers/usb/gadget/storage_common.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c20
-rw-r--r--drivers/usb/host/ehci-mxc.c4
-rw-r--r--drivers/usb/host/ehci-ppc-of.c11
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c11
-rw-r--r--drivers/usb/host/fhci-hcd.c11
-rw-r--r--drivers/usb/host/isp1362.h2
-rw-r--r--drivers/usb/host/isp1760-if.c9
-rw-r--r--drivers/usb/host/ohci-ppc-of.c15
-rw-r--r--drivers/usb/host/r8a66597-hcd.c4
-rw-r--r--drivers/usb/host/r8a66597.h26
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-ring.c31
-rw-r--r--drivers/usb/host/xhci.c57
-rw-r--r--drivers/usb/host/xhci.h12
-rw-r--r--drivers/usb/mon/mon_bin.c23
-rw-r--r--drivers/usb/mon/mon_stat.c3
-rw-r--r--drivers/usb/serial/digi_acceleport.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/mos7840.c1
-rw-r--r--drivers/vhost/net.c16
-rw-r--r--drivers/vhost/vhost.c59
-rw-r--r--drivers/video/Kconfig7
-rw-r--r--drivers/video/arcfb.c8
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/aty/mach64_accel.c9
-rw-r--r--drivers/video/backlight/88pm860x_bl.c2
-rw-r--r--drivers/video/backlight/Kconfig116
-rw-r--r--drivers/video/backlight/Makefile4
-rw-r--r--drivers/video/backlight/adp8860_bl.c836
-rw-r--r--drivers/video/backlight/adx_bl.c4
-rw-r--r--drivers/video/backlight/ep93xx_bl.c160
-rw-r--r--drivers/video/backlight/l4f00242t03.c11
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c47
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c190
-rw-r--r--drivers/video/backlight/s6e63m0.c920
-rw-r--r--drivers/video/backlight/s6e63m0_gamma.h266
-rw-r--r--drivers/video/backlight/tosa_bl.c2
-rw-r--r--drivers/video/bf54x-lq043fb.c7
-rw-r--r--drivers/video/bfin-lq035q1-fb.c252
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c7
-rw-r--r--drivers/video/bw2.c9
-rw-r--r--drivers/video/cg14.c9
-rw-r--r--drivers/video/cg3.c9
-rw-r--r--drivers/video/cg6.c9
-rw-r--r--drivers/video/da8xx-fb.c301
-rw-r--r--drivers/video/fb_defio.c14
-rw-r--r--drivers/video/fbmem.c5
-rw-r--r--drivers/video/ffb.c9
-rw-r--r--drivers/video/fsl-diu-fb.c10
-rw-r--r--drivers/video/hgafb.c10
-rw-r--r--drivers/video/hitfb.c8
-rw-r--r--drivers/video/intelfb/intelfb.h4
-rw-r--r--drivers/video/leo.c9
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c10
-rw-r--r--drivers/video/mx3fb.c3
-rw-r--r--drivers/video/nuc900fb.c2
-rw-r--r--drivers/video/omap2/displays/Kconfig9
-rw-r--r--drivers/video/omap2/displays/Makefile1
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c819
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c78
-rw-r--r--drivers/video/omap2/displays/panel-taal.c143
-rw-r--r--drivers/video/omap2/dss/Kconfig6
-rw-r--r--drivers/video/omap2/dss/Makefile3
-rw-r--r--drivers/video/omap2/dss/core.c85
-rw-r--r--drivers/video/omap2/dss/display.c9
-rw-r--r--drivers/video/omap2/dss/dss.c24
-rw-r--r--drivers/video/omap2/dss/dss.h50
-rw-r--r--drivers/video/omap2/dss/manager.c21
-rw-r--r--drivers/video/omap2/dss/sdi.c26
-rw-r--r--drivers/video/omap2/dss/venc.c15
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c5
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c25
-rw-r--r--drivers/video/p9100.c9
-rw-r--r--drivers/video/platinumfb.c9
-rw-r--r--drivers/video/s3c2410fb.c10
-rw-r--r--drivers/video/s3fb.c101
-rw-r--r--drivers/video/sgivwfb.c10
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c6
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/sunxvr1000.c9
-rw-r--r--drivers/video/tcx.c9
-rw-r--r--drivers/video/vfb.c4
-rw-r--r--drivers/video/vga16fb.c10
-rw-r--r--drivers/video/via/viafbdev.c11
-rw-r--r--drivers/video/w100fb.c10
-rw-r--r--drivers/video/xilinxfb.c23
-rw-r--r--drivers/virtio/virtio_balloon.c17
-rw-r--r--drivers/virtio/virtio_ring.c44
-rw-r--r--drivers/watchdog/Kconfig26
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/bfin_wdt.c19
-rw-r--r--drivers/watchdog/booke_wdt.c6
-rw-r--r--drivers/watchdog/cpwd.c9
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/gef_wdt.c10
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c11
-rw-r--r--drivers/watchdog/iTCO_wdt.c29
-rw-r--r--drivers/watchdog/imx2_wdt.c358
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c12
-rw-r--r--drivers/watchdog/pc87413_wdt.c9
-rw-r--r--drivers/watchdog/pnx833x_wdt.c11
-rw-r--r--drivers/watchdog/rdc321x_wdt.c53
-rw-r--r--drivers/watchdog/riowd.c7
-rw-r--r--drivers/watchdog/s3c2410_wdt.c9
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/twl4030_wdt.c2
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/watchdog/wm8350_wdt.c2
-rw-r--r--drivers/xen/manage.c14
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c2
2047 files changed, 228359 insertions, 71564 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index f42a03029b7c..91874e048552 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_PARISC) += parisc/
obj-$(CONFIG_RAPIDIO) += rapidio/
obj-y += video/
+obj-y += idle/
obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_SFI) += sfi/
# PnP must come after ACPI since it will eventually need to check if acpi
@@ -91,7 +92,6 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
-obj-y += idle/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-$(CONFIG_NEW_LEDS) += leds/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 93d2c7971df6..746411518802 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -360,4 +360,13 @@ config ACPI_SBS
To compile this driver as a module, choose M here:
the modules will be called sbs and sbshc.
+config ACPI_HED
+ tristate "Hardware Error Device"
+ help
+ This driver supports the Hardware Error Device (PNP0C33),
+ which is used to report some hardware errors notified via
+ SCI, mainly the corrected errors.
+
+source "drivers/acpi/apei/Kconfig"
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a8d8998dd5c5..6ee33169e1dc 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,7 +19,7 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
-acpi-y += hest.o
+acpi-y += atomicio.o
# sleep related files
acpi-y += wakeup.o
@@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o
obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
+obj-$(CONFIG_ACPI_HED) += hed.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
@@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o
processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
+
+obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 62122134693b..d269a8f3329c 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -43,6 +43,10 @@ static DEFINE_MUTEX(isolated_cpus_lock);
#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
static unsigned long power_saving_mwait_eax;
+
+static unsigned char tsc_detected_unstable;
+static unsigned char tsc_marked_unstable;
+
static void power_saving_mwait_init(void)
{
unsigned int eax, ebx, ecx, edx;
@@ -87,8 +91,8 @@ static void power_saving_mwait_init(void)
/*FALL THROUGH*/
default:
- /* TSC could halt in idle, so notify users */
- mark_tsc_unstable("TSC halts in idle");
+ /* TSC could halt in idle */
+ tsc_detected_unstable = 1;
}
#endif
}
@@ -168,16 +172,14 @@ static int power_saving_thread(void *data)
do_sleep = 0;
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
-
expire_time = jiffies + HZ * (100 - idle_pct) / 100;
while (!need_resched()) {
+ if (tsc_detected_unstable && !tsc_marked_unstable) {
+ /* TSC could halt in idle, so notify users */
+ mark_tsc_unstable("TSC halts in idle");
+ tsc_marked_unstable = 1;
+ }
local_irq_disable();
cpu = smp_processor_id();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
@@ -200,8 +202,6 @@ static int power_saving_thread(void *data)
}
}
- current_thread_info()->status |= TS_POLLING;
-
/*
* current sched_rt has threshold for rt task running time.
* When a rt task uses 95% CPU time, the rt thread will be
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 7c7bbb4d402c..d5a5efc043bf 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status acpi_enable(void)
{
- acpi_status status = AE_OK;
+ acpi_status status;
ACPI_FUNCTION_TRACE(acpi_enable);
@@ -84,21 +84,30 @@ acpi_status acpi_enable(void)
if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"System is already in ACPI mode\n"));
- } else {
- /* Transition to ACPI mode */
+ return_ACPI_STATUS(AE_OK);
+ }
- status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not transition to ACPI mode"));
- return_ACPI_STATUS(status);
- }
+ /* Transition to ACPI mode */
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Transition to ACPI mode successful\n"));
+ status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not transition to ACPI mode"));
+ return_ACPI_STATUS(status);
}
- return_ACPI_STATUS(status);
+ /* Sanity check that transition succeeded */
+
+ if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) {
+ ACPI_ERROR((AE_INFO,
+ "Hardware did not enter ACPI mode"));
+ return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "Transition to ACPI mode successful\n"));
+
+ return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_enable)
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 679a112a7d26..b44274a0b62c 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode)
{
acpi_status status;
- u32 retry;
ACPI_FUNCTION_TRACE(hw_set_mode);
@@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
return_ACPI_STATUS(status);
}
- /*
- * Some hardware takes a LONG time to switch modes. Give them 3 sec to
- * do so, but allow faster systems to proceed more quickly.
- */
- retry = 3000;
- while (retry) {
- if (acpi_hw_get_mode() == mode) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Mode %X successfully enabled\n",
- mode));
- return_ACPI_STATUS(AE_OK);
- }
- acpi_os_stall(1000);
- retry--;
- }
-
- ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
- return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
new file mode 100644
index 000000000000..f8c668f27b5a
--- /dev/null
+++ b/drivers/acpi/apei/Kconfig
@@ -0,0 +1,30 @@
+config ACPI_APEI
+ bool "ACPI Platform Error Interface (APEI)"
+ depends on X86
+ help
+ APEI allows to report errors (for example from the chipset)
+ to the operating system. This improves NMI handling
+ especially. In addition it supports error serialization and
+ error injection.
+
+config ACPI_APEI_GHES
+ tristate "APEI Generic Hardware Error Source"
+ depends on ACPI_APEI && X86
+ select ACPI_HED
+ help
+ Generic Hardware Error Source provides a way to report
+ platform hardware errors (such as that from chipset). It
+ works in so called "Firmware First" mode, that is, hardware
+ errors are reported to firmware firstly, then reported to
+ Linux by firmware. This way, some non-standard hardware
+ error registers or non-standard hardware link can be checked
+ by firmware to produce more valuable hardware error
+ information for Linux.
+
+config ACPI_APEI_EINJ
+ tristate "APEI Error INJection (EINJ)"
+ depends on ACPI_APEI && DEBUG_FS
+ help
+ EINJ provides a hardware error injection mechanism, it is
+ mainly used for debugging and testing the other parts of
+ APEI and some other RAS features.
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
new file mode 100644
index 000000000000..b13b03a17789
--- /dev/null
+++ b/drivers/acpi/apei/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_ACPI_APEI) += apei.o
+obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
+obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+
+apei-y := apei-base.o hest.o cper.o erst.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
new file mode 100644
index 000000000000..db3946e9c66b
--- /dev/null
+++ b/drivers/acpi/apei/apei-base.c
@@ -0,0 +1,593 @@
+/*
+ * apei-base.c - ACPI Platform Error Interface (APEI) supporting
+ * infrastructure
+ *
+ * APEI allows to report errors (for example from the chipset) to the
+ * the operating system. This improves NMI handling especially. In
+ * addition it supports error serialization and error injection.
+ *
+ * For more information about APEI, please refer to ACPI Specification
+ * version 4.0, chapter 17.
+ *
+ * This file has Common functions used by more than one APEI table,
+ * including framework of interpreter for ERST and EINJ; resource
+ * management for APEI registers.
+ *
+ * Copyright (C) 2009, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <acpi/atomicio.h>
+
+#include "apei-internal.h"
+
+#define APEI_PFX "APEI: "
+
+/*
+ * APEI ERST (Error Record Serialization Table) and EINJ (Error
+ * INJection) interpreter framework.
+ */
+
+#define APEI_EXEC_PRESERVE_REGISTER 0x1
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries)
+{
+ ctx->ins_table = ins_table;
+ ctx->instructions = instructions;
+ ctx->action_table = action_table;
+ ctx->entries = entries;
+}
+EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
+{
+ int rc;
+
+ rc = acpi_atomic_read(val, &entry->register_region);
+ if (rc)
+ return rc;
+ *val >>= entry->register_region.bit_offset;
+ *val &= entry->mask;
+
+ return 0;
+}
+
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val = 0;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ ctx->value = val;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register);
+
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ rc = apei_exec_read_register(ctx, entry);
+ if (rc)
+ return rc;
+ ctx->value = (ctx->value == entry->value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
+
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
+{
+ int rc;
+
+ val &= entry->mask;
+ val <<= entry->register_region.bit_offset;
+ if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
+ u64 valr = 0;
+ rc = acpi_atomic_read(&valr, &entry->register_region);
+ if (rc)
+ return rc;
+ valr &= ~(entry->mask << entry->register_region.bit_offset);
+ val |= valr;
+ }
+ rc = acpi_atomic_write(val, &entry->register_region);
+
+ return rc;
+}
+
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_write_register(entry, ctx->value);
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register);
+
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ ctx->value = entry->value;
+ rc = apei_exec_write_register(ctx, entry);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
+
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_noop);
+
+/*
+ * Interpret the specified action. Go through whole action table,
+ * execute all instructions belong to the action.
+ */
+int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+{
+ int rc;
+ u32 i, ip;
+ struct acpi_whea_header *entry;
+ apei_exec_ins_func_t run;
+
+ ctx->ip = 0;
+
+ /*
+ * "ip" is the instruction pointer of current instruction,
+ * "ctx->ip" specifies the next instruction to executed,
+ * instruction "run" function may change the "ctx->ip" to
+ * implement "goto" semantics.
+ */
+rewind:
+ ip = 0;
+ for (i = 0; i < ctx->entries; i++) {
+ entry = &ctx->action_table[i];
+ if (entry->action != action)
+ continue;
+ if (ip == ctx->ip) {
+ if (entry->instruction >= ctx->instructions ||
+ !ctx->ins_table[entry->instruction].run) {
+ pr_warning(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ entry->instruction);
+ return -EINVAL;
+ }
+ run = ctx->ins_table[entry->instruction].run;
+ rc = run(ctx, entry);
+ if (rc < 0)
+ return rc;
+ else if (rc != APEI_EXEC_SET_IP)
+ ctx->ip++;
+ }
+ ip++;
+ if (ctx->ip < ip)
+ goto rewind;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_run);
+
+typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data);
+
+static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
+ apei_exec_entry_func_t func,
+ void *data,
+ int *end)
+{
+ u8 ins;
+ int i, rc;
+ struct acpi_whea_header *entry;
+ struct apei_exec_ins_type *ins_table = ctx->ins_table;
+
+ for (i = 0; i < ctx->entries; i++) {
+ entry = ctx->action_table + i;
+ ins = entry->instruction;
+ if (end)
+ *end = i;
+ if (ins >= ctx->instructions || !ins_table[ins].run) {
+ pr_warning(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ ins);
+ return -EINVAL;
+ }
+ rc = func(ctx, entry, data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int pre_map_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ return acpi_pre_map_gar(&entry->register_region);
+
+ return 0;
+}
+
+/*
+ * Pre-map all GARs in action table to make it possible to access them
+ * in NMI handler.
+ */
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
+{
+ int rc, end;
+
+ rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
+ NULL, &end);
+ if (rc) {
+ struct apei_exec_context ctx_unmap;
+ memcpy(&ctx_unmap, ctx, sizeof(*ctx));
+ ctx_unmap.entries = end;
+ apei_exec_post_unmap_gars(&ctx_unmap);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
+
+static int post_unmap_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ acpi_post_unmap_gar(&entry->register_region);
+
+ return 0;
+}
+
+/* Post-unmap all GAR in action table. */
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
+{
+ return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
+ NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
+
+/*
+ * Resource management for GARs in APEI
+ */
+struct apei_res {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+};
+
+/* Collect all resources requested, to avoid conflict */
+struct apei_resources apei_resources_all = {
+ .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
+ .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
+};
+
+static int apei_res_add(struct list_head *res_list,
+ unsigned long start, unsigned long size)
+{
+ struct apei_res *res, *resn, *res_ins = NULL;
+ unsigned long end = start + size;
+
+ if (end <= start)
+ return 0;
+repeat:
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ if (res->start > end || res->end < start)
+ continue;
+ else if (end <= res->end && start >= res->start) {
+ kfree(res_ins);
+ return 0;
+ }
+ list_del(&res->list);
+ res->start = start = min(res->start, start);
+ res->end = end = max(res->end, end);
+ kfree(res_ins);
+ res_ins = res;
+ goto repeat;
+ }
+
+ if (res_ins)
+ list_add(&res_ins->list, res_list);
+ else {
+ res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res_ins)
+ return -ENOMEM;
+ res_ins->start = start;
+ res_ins->end = end;
+ list_add(&res_ins->list, res_list);
+ }
+
+ return 0;
+}
+
+static int apei_res_sub(struct list_head *res_list1,
+ struct list_head *res_list2)
+{
+ struct apei_res *res1, *resn1, *res2, *res;
+ res1 = list_entry(res_list1->next, struct apei_res, list);
+ resn1 = list_entry(res1->list.next, struct apei_res, list);
+ while (&res1->list != res_list1) {
+ list_for_each_entry(res2, res_list2, list) {
+ if (res1->start >= res2->end ||
+ res1->end <= res2->start)
+ continue;
+ else if (res1->end <= res2->end &&
+ res1->start >= res2->start) {
+ list_del(&res1->list);
+ kfree(res1);
+ break;
+ } else if (res1->end > res2->end &&
+ res1->start < res2->start) {
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ res->start = res2->end;
+ res->end = res1->end;
+ res1->end = res2->start;
+ list_add(&res->list, &res1->list);
+ resn1 = res;
+ } else {
+ if (res1->start < res2->start)
+ res1->end = res2->start;
+ else
+ res1->start = res2->end;
+ }
+ }
+ res1 = resn1;
+ resn1 = list_entry(resn1->list.next, struct apei_res, list);
+ }
+
+ return 0;
+}
+
+static void apei_res_clean(struct list_head *res_list)
+{
+ struct apei_res *res, *resn;
+
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ list_del(&res->list);
+ kfree(res);
+ }
+}
+
+void apei_resources_fini(struct apei_resources *resources)
+{
+ apei_res_clean(&resources->iomem);
+ apei_res_clean(&resources->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_fini);
+
+static int apei_resources_merge(struct apei_resources *resources1,
+ struct apei_resources *resources2)
+{
+ int rc;
+ struct apei_res *res;
+
+ list_for_each_entry(res, &resources2->iomem, list) {
+ rc = apei_res_add(&resources1->iomem, res->start,
+ res->end - res->start);
+ if (rc)
+ return rc;
+ }
+ list_for_each_entry(res, &resources2->ioport, list) {
+ rc = apei_res_add(&resources1->ioport, res->start,
+ res->end - res->start);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * EINJ has two groups of GARs (EINJ table entry and trigger table
+ * entry), so common resources are subtracted from the trigger table
+ * resources before the second requesting.
+ */
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2)
+{
+ int rc;
+
+ rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
+ if (rc)
+ return rc;
+ return apei_res_sub(&resources1->ioport, &resources2->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_sub);
+
+/*
+ * IO memory/port rersource management mechanism is used to check
+ * whether memory/port area used by GARs conflicts with normal memory
+ * or IO memory/port of devices.
+ */
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc)
+{
+ struct apei_res *res, *res_bak;
+ struct resource *r;
+
+ apei_resources_sub(resources, &apei_resources_all);
+
+ list_for_each_entry(res, &resources->iomem, list) {
+ r = request_mem_region(res->start, res->end - res->start,
+ desc);
+ if (!r) {
+ pr_err(APEI_PFX
+ "Can not request iomem region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_iomem;
+ }
+ }
+
+ list_for_each_entry(res, &resources->ioport, list) {
+ r = request_region(res->start, res->end - res->start, desc);
+ if (!r) {
+ pr_err(APEI_PFX
+ "Can not request ioport region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_ioport;
+ }
+ }
+
+ apei_resources_merge(&apei_resources_all, resources);
+
+ return 0;
+err_unmap_ioport:
+ list_for_each_entry(res, &resources->ioport, list) {
+ if (res == res_bak)
+ break;
+ release_mem_region(res->start, res->end - res->start);
+ }
+ res_bak = NULL;
+err_unmap_iomem:
+ list_for_each_entry(res, &resources->iomem, list) {
+ if (res == res_bak)
+ break;
+ release_region(res->start, res->end - res->start);
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(apei_resources_request);
+
+void apei_resources_release(struct apei_resources *resources)
+{
+ struct apei_res *res;
+
+ list_for_each_entry(res, &resources->iomem, list)
+ release_mem_region(res->start, res->end - res->start);
+ list_for_each_entry(res, &resources->ioport, list)
+ release_region(res->start, res->end - res->start);
+
+ apei_resources_sub(&apei_resources_all, resources);
+}
+EXPORT_SYMBOL_GPL(apei_resources_release);
+
+static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int collect_res_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ struct apei_resources *resources = data;
+ struct acpi_generic_address *reg = &entry->register_region;
+ u8 ins = entry->instruction;
+ u64 paddr;
+ int rc;
+
+ if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
+ return 0;
+
+ rc = apei_check_gar(reg, &paddr);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return apei_res_add(&resources->iomem, paddr,
+ reg->bit_width / 8);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return apei_res_add(&resources->ioport, paddr,
+ reg->bit_width / 8);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Same register may be used by multiple instructions in GARs, so
+ * resources are collected before requesting.
+ */
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources)
+{
+ return apei_exec_for_each_entry(ctx, collect_res_callback,
+ resources, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
+
+struct dentry *apei_get_debugfs_dir(void)
+{
+ static struct dentry *dapei;
+
+ if (!dapei)
+ dapei = debugfs_create_dir("apei", NULL);
+
+ return dapei;
+}
+EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
new file mode 100644
index 000000000000..18df1e940276
--- /dev/null
+++ b/drivers/acpi/apei/apei-internal.h
@@ -0,0 +1,114 @@
+/*
+ * apei-internal.h - ACPI Platform Error Interface internal
+ * definations.
+ */
+
+#ifndef APEI_INTERNAL_H
+#define APEI_INTERNAL_H
+
+#include <linux/cper.h>
+
+struct apei_exec_context;
+
+typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+
+#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001
+
+struct apei_exec_ins_type {
+ u32 flags;
+ apei_exec_ins_func_t run;
+};
+
+struct apei_exec_context {
+ u32 ip;
+ u64 value;
+ u64 var1;
+ u64 var2;
+ u64 src_base;
+ u64 dst_base;
+ struct apei_exec_ins_type *ins_table;
+ u32 instructions;
+ struct acpi_whea_header *action_table;
+ u32 entries;
+};
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries);
+
+static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx,
+ u64 input)
+{
+ ctx->value = input;
+}
+
+static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
+{
+ return ctx->value;
+}
+
+int apei_exec_run(struct apei_exec_context *ctx, u8 action);
+
+/* Common instruction implementation */
+
+/* IP has been set in instruction function */
+#define APEI_EXEC_SET_IP 1
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
+
+struct apei_resources {
+ struct list_head iomem;
+ struct list_head ioport;
+};
+
+static inline void apei_resources_init(struct apei_resources *resources)
+{
+ INIT_LIST_HEAD(&resources->iomem);
+ INIT_LIST_HEAD(&resources->ioport);
+}
+
+void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2);
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc);
+void apei_resources_release(struct apei_resources *resources);
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources);
+
+struct dentry;
+struct dentry *apei_get_debugfs_dir(void);
+
+#define apei_estatus_for_each_section(estatus, section) \
+ for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
+ (void *)section - (void *)estatus < estatus->data_length; \
+ section = (void *)(section+1) + section->error_data_length)
+
+static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->raw_data_length)
+ return estatus->raw_data_offset + \
+ estatus->raw_data_length;
+ else
+ return sizeof(*estatus) + estatus->data_length;
+}
+
+int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
+#endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
new file mode 100644
index 000000000000..f4cf2fc4c8c1
--- /dev/null
+++ b/drivers/acpi/apei/cper.c
@@ -0,0 +1,84 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * CPER is the format used to describe platform hardware error by
+ * various APEI tables, such as ERST, BERT and HEST etc.
+ *
+ * For more information about CPER, please refer to Appendix N of UEFI
+ * Specification version 2.3.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/acpi.h>
+
+/*
+ * CPER record ID need to be unique even after reboot, because record
+ * ID is used as index for ERST storage, while CPER records from
+ * multiple boot may co-exist in ERST.
+ */
+u64 cper_next_record_id(void)
+{
+ static atomic64_t seq;
+
+ if (!atomic64_read(&seq))
+ atomic64_set(&seq, ((u64)get_seconds()) << 32);
+
+ return atomic64_inc_return(&seq);
+}
+EXPORT_SYMBOL_GPL(cper_next_record_id);
+
+int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->data_length &&
+ estatus->data_length < sizeof(struct acpi_hest_generic_data))
+ return -EINVAL;
+ if (estatus->raw_data_length &&
+ estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_estatus_check_header);
+
+int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
+{
+ struct acpi_hest_generic_data *gdata;
+ unsigned int data_len, gedata_len;
+ int rc;
+
+ rc = apei_estatus_check_header(estatus);
+ if (rc)
+ return rc;
+ data_len = estatus->data_length;
+ gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+ while (data_len > sizeof(*gdata)) {
+ gedata_len = gdata->error_data_length;
+ if (gedata_len > data_len - sizeof(*gdata))
+ return -EINVAL;
+ data_len -= gedata_len + sizeof(*gdata);
+ }
+ if (data_len)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_estatus_check);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
new file mode 100644
index 000000000000..465c885938ee
--- /dev/null
+++ b/drivers/acpi/apei/einj.c
@@ -0,0 +1,548 @@
+/*
+ * APEI Error INJection support
+ *
+ * EINJ provides a hardware error injection mechanism, this is useful
+ * for debugging and testing of other APEI and RAS features.
+ *
+ * For more information about EINJ, please refer to ACPI Specification
+ * version 4.0, section 17.5.
+ *
+ * Copyright 2009-2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <acpi/acpi.h>
+
+#include "apei-internal.h"
+
+#define EINJ_PFX "EINJ: "
+
+#define SPIN_UNIT 100 /* 100ns */
+/* Firmware should respond within 1 miliseconds */
+#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+
+/*
+ * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
+ * EINJ table through an unpublished extension. Use with caution as
+ * most will ignore the parameter and make their own choice of address
+ * for error injection.
+ */
+struct einj_parameter {
+ u64 type;
+ u64 reserved1;
+ u64 reserved2;
+ u64 param1;
+ u64 param2;
+};
+
+#define EINJ_OP_BUSY 0x1
+#define EINJ_STATUS_SUCCESS 0x0
+#define EINJ_STATUS_FAIL 0x1
+#define EINJ_STATUS_INVAL 0x2
+
+#define EINJ_TAB_ENTRY(tab) \
+ ((struct acpi_whea_header *)((char *)(tab) + \
+ sizeof(struct acpi_table_einj)))
+
+static struct acpi_table_einj *einj_tab;
+
+static struct apei_resources einj_resources;
+
+static struct apei_exec_ins_type einj_ins_type[] = {
+ [ACPI_EINJ_READ_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register,
+ },
+ [ACPI_EINJ_READ_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register_value,
+ },
+ [ACPI_EINJ_WRITE_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register,
+ },
+ [ACPI_EINJ_WRITE_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register_value,
+ },
+ [ACPI_EINJ_NOOP] = {
+ .flags = 0,
+ .run = apei_exec_noop,
+ },
+};
+
+/*
+ * Prevent EINJ interpreter to run simultaneously, because the
+ * corresponding firmware implementation may not work properly when
+ * invoked simultaneously.
+ */
+static DEFINE_MUTEX(einj_mutex);
+
+static struct einj_parameter *einj_param;
+
+static void einj_exec_ctx_init(struct apei_exec_context *ctx)
+{
+ apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
+ EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
+}
+
+static int __einj_get_available_error_type(u32 *type)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ *type = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+/* Get error injection capabilities of the platform */
+static int einj_get_available_error_type(u32 *type)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_get_available_error_type(type);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+static int einj_timedout(u64 *t)
+{
+ if ((s64)*t < SPIN_UNIT) {
+ pr_warning(FW_WARN EINJ_PFX
+ "Firmware does not respond in time\n");
+ return 1;
+ }
+ *t -= SPIN_UNIT;
+ ndelay(SPIN_UNIT);
+ touch_nmi_watchdog();
+ return 0;
+}
+
+static u64 einj_get_parameter_address(void)
+{
+ int i;
+ u64 paddr = 0;
+ struct acpi_whea_header *entry;
+
+ entry = EINJ_TAB_ENTRY(einj_tab);
+ for (i = 0; i < einj_tab->entries; i++) {
+ if (entry->action == ACPI_EINJ_SET_ERROR_TYPE &&
+ entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
+ entry->register_region.space_id ==
+ ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ memcpy(&paddr, &entry->register_region.address,
+ sizeof(paddr));
+ entry++;
+ }
+
+ return paddr;
+}
+
+/* do sanity check to trigger table */
+static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
+{
+ if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
+ return -EINVAL;
+ if (trigger_tab->table_size > PAGE_SIZE ||
+ trigger_tab->table_size <= trigger_tab->header_size)
+ return -EINVAL;
+ if (trigger_tab->entry_count !=
+ (trigger_tab->table_size - trigger_tab->header_size) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Execute instructions in trigger error action table */
+static int __einj_error_trigger(u64 trigger_paddr)
+{
+ struct acpi_einj_trigger *trigger_tab = NULL;
+ struct apei_exec_context trigger_ctx;
+ struct apei_resources trigger_resources;
+ struct acpi_whea_header *trigger_entry;
+ struct resource *r;
+ u32 table_size;
+ int rc = -EIO;
+
+ r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_err(EINJ_PFX
+ "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
+ (unsigned long long)trigger_paddr,
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
+ goto out;
+ }
+ trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
+ if (!trigger_tab) {
+ pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_header;
+ }
+ rc = einj_check_trigger_header(trigger_tab);
+ if (rc) {
+ pr_warning(FW_BUG EINJ_PFX
+ "The trigger error action table is invalid\n");
+ goto out_rel_header;
+ }
+ rc = -EIO;
+ table_size = trigger_tab->table_size;
+ r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_err(EINJ_PFX
+"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + table_size);
+ goto out_rel_header;
+ }
+ iounmap(trigger_tab);
+ trigger_tab = ioremap_cache(trigger_paddr, table_size);
+ if (!trigger_tab) {
+ pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_entry;
+ }
+ trigger_entry = (struct acpi_whea_header *)
+ ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ apei_resources_init(&trigger_resources);
+ apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
+ ARRAY_SIZE(einj_ins_type),
+ trigger_entry, trigger_tab->entry_count);
+ rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_sub(&trigger_resources, &einj_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
+ if (rc)
+ goto out_fini;
+ rc = apei_exec_pre_map_gars(&trigger_ctx);
+ if (rc)
+ goto out_release;
+
+ rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR);
+
+ apei_exec_post_unmap_gars(&trigger_ctx);
+out_release:
+ apei_resources_release(&trigger_resources);
+out_fini:
+ apei_resources_fini(&trigger_resources);
+out_rel_entry:
+ release_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab));
+out_rel_header:
+ release_mem_region(trigger_paddr, sizeof(*trigger_tab));
+out:
+ if (trigger_tab)
+ iounmap(trigger_tab);
+
+ return rc;
+}
+
+static int __einj_error_inject(u32 type, u64 param1, u64 param2)
+{
+ struct apei_exec_context ctx;
+ u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, type);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (einj_param) {
+ writeq(param1, &einj_param->param1);
+ writeq(param2, &einj_param->param2);
+ }
+ rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!(val & EINJ_OP_BUSY))
+ break;
+ if (einj_timedout(&timeout))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (val != EINJ_STATUS_SUCCESS)
+ return -EBUSY;
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
+ if (rc)
+ return rc;
+ trigger_paddr = apei_exec_ctx_get_output(&ctx);
+ rc = __einj_error_trigger(trigger_paddr);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
+
+ return rc;
+}
+
+/* Inject the specified hardware error */
+static int einj_error_inject(u32 type, u64 param1, u64 param2)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_error_inject(type, param1, param2);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+static u32 error_type;
+static u64 error_param1;
+static u64 error_param2;
+static struct dentry *einj_debug_dir;
+
+static int available_error_type_show(struct seq_file *m, void *v)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (available_error_type & 0x0001)
+ seq_printf(m, "0x00000001\tProcessor Correctable\n");
+ if (available_error_type & 0x0002)
+ seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0004)
+ seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n");
+ if (available_error_type & 0x0008)
+ seq_printf(m, "0x00000008\tMemory Correctable\n");
+ if (available_error_type & 0x0010)
+ seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0020)
+ seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n");
+ if (available_error_type & 0x0040)
+ seq_printf(m, "0x00000040\tPCI Express Correctable\n");
+ if (available_error_type & 0x0080)
+ seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0100)
+ seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n");
+ if (available_error_type & 0x0200)
+ seq_printf(m, "0x00000200\tPlatform Correctable\n");
+ if (available_error_type & 0x0400)
+ seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0800)
+ seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n");
+
+ return 0;
+}
+
+static int available_error_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, available_error_type_show, NULL);
+}
+
+static const struct file_operations available_error_type_fops = {
+ .open = available_error_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int error_type_get(void *data, u64 *val)
+{
+ *val = error_type;
+
+ return 0;
+}
+
+static int error_type_set(void *data, u64 val)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ /* Only one error type can be specified */
+ if (val & (val - 1))
+ return -EINVAL;
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (!(val & available_error_type))
+ return -EINVAL;
+ error_type = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
+ error_type_set, "0x%llx\n");
+
+static int error_inject_set(void *data, u64 val)
+{
+ if (!error_type)
+ return -EINVAL;
+
+ return einj_error_inject(error_type, error_param1, error_param2);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
+ error_inject_set, "%llu\n");
+
+static int einj_check_table(struct acpi_table_einj *einj_tab)
+{
+ if (einj_tab->header_length != sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->header.length < sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->entries !=
+ (einj_tab->header.length - sizeof(struct acpi_table_einj)) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init einj_init(void)
+{
+ int rc;
+ u64 param_paddr;
+ acpi_status status;
+ struct dentry *fentry;
+ struct apei_exec_context ctx;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ status = acpi_get_table(ACPI_SIG_EINJ, 0,
+ (struct acpi_table_header **)&einj_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(EINJ_PFX "Table is not found!\n");
+ return -ENODEV;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
+ return -EINVAL;
+ }
+
+ rc = einj_check_table(einj_tab);
+ if (rc) {
+ pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
+ return -EINVAL;
+ }
+
+ rc = -ENOMEM;
+ einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
+ if (!einj_debug_dir)
+ goto err_cleanup;
+ fentry = debugfs_create_file("available_error_type", S_IRUSR,
+ einj_debug_dir, NULL,
+ &available_error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
+ einj_debug_dir, NULL, &error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_inject", S_IWUSR,
+ einj_debug_dir, NULL, &error_inject_fops);
+ if (!fentry)
+ goto err_cleanup;
+
+ apei_resources_init(&einj_resources);
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_collect_resources(&ctx, &einj_resources);
+ if (rc)
+ goto err_fini;
+ rc = apei_resources_request(&einj_resources, "APEI EINJ");
+ if (rc)
+ goto err_fini;
+ rc = apei_exec_pre_map_gars(&ctx);
+ if (rc)
+ goto err_release;
+ param_paddr = einj_get_parameter_address();
+ if (param_paddr) {
+ einj_param = ioremap(param_paddr, sizeof(*einj_param));
+ rc = -ENOMEM;
+ if (!einj_param)
+ goto err_unmap;
+ }
+
+ pr_info(EINJ_PFX "Error INJection is initialized.\n");
+
+ return 0;
+
+err_unmap:
+ apei_exec_post_unmap_gars(&ctx);
+err_release:
+ apei_resources_release(&einj_resources);
+err_fini:
+ apei_resources_fini(&einj_resources);
+err_cleanup:
+ debugfs_remove_recursive(einj_debug_dir);
+
+ return rc;
+}
+
+static void __exit einj_exit(void)
+{
+ struct apei_exec_context ctx;
+
+ if (einj_param)
+ iounmap(einj_param);
+ einj_exec_ctx_init(&ctx);
+ apei_exec_post_unmap_gars(&ctx);
+ apei_resources_release(&einj_resources);
+ apei_resources_fini(&einj_resources);
+ debugfs_remove_recursive(einj_debug_dir);
+}
+
+module_init(einj_init);
+module_exit(einj_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Error INJection support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
new file mode 100644
index 000000000000..2ebc39115507
--- /dev/null
+++ b/drivers/acpi/apei/erst.c
@@ -0,0 +1,855 @@
+/*
+ * APEI Error Record Serialization Table support
+ *
+ * ERST is a way provided by APEI to save and retrieve hardware error
+ * infomation to and from a persistent store.
+ *
+ * For more information about ERST, please refer to ACPI Specification
+ * version 4.0, section 17.4.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include <linux/uaccess.h>
+#include <linux/cper.h>
+#include <linux/nmi.h>
+#include <acpi/apei.h>
+
+#include "apei-internal.h"
+
+#define ERST_PFX "ERST: "
+
+/* ERST command status */
+#define ERST_STATUS_SUCCESS 0x0
+#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1
+#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2
+#define ERST_STATUS_FAILED 0x3
+#define ERST_STATUS_RECORD_STORE_EMPTY 0x4
+#define ERST_STATUS_RECORD_NOT_FOUND 0x5
+
+#define ERST_TAB_ENTRY(tab) \
+ ((struct acpi_whea_header *)((char *)(tab) + \
+ sizeof(struct acpi_table_erst)))
+
+#define SPIN_UNIT 100 /* 100ns */
+/* Firmware should respond within 1 miliseconds */
+#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+#define FIRMWARE_MAX_STALL 50 /* 50us */
+
+int erst_disable;
+EXPORT_SYMBOL_GPL(erst_disable);
+
+static struct acpi_table_erst *erst_tab;
+
+/* ERST Error Log Address Range atrributes */
+#define ERST_RANGE_RESERVED 0x0001
+#define ERST_RANGE_NVRAM 0x0002
+#define ERST_RANGE_SLOW 0x0004
+
+/*
+ * ERST Error Log Address Range, used as buffer for reading/writing
+ * error records.
+ */
+static struct erst_erange {
+ u64 base;
+ u64 size;
+ void __iomem *vaddr;
+ u32 attr;
+} erst_erange;
+
+/*
+ * Prevent ERST interpreter to run simultaneously, because the
+ * corresponding firmware implementation may not work properly when
+ * invoked simultaneously.
+ *
+ * It is used to provide exclusive accessing for ERST Error Log
+ * Address Range too.
+ */
+static DEFINE_SPINLOCK(erst_lock);
+
+static inline int erst_errno(int command_status)
+{
+ switch (command_status) {
+ case ERST_STATUS_SUCCESS:
+ return 0;
+ case ERST_STATUS_HARDWARE_NOT_AVAILABLE:
+ return -ENODEV;
+ case ERST_STATUS_NOT_ENOUGH_SPACE:
+ return -ENOSPC;
+ case ERST_STATUS_RECORD_STORE_EMPTY:
+ case ERST_STATUS_RECORD_NOT_FOUND:
+ return -ENOENT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int erst_timedout(u64 *t, u64 spin_unit)
+{
+ if ((s64)*t < spin_unit) {
+ pr_warning(FW_WARN ERST_PFX
+ "Firmware does not respond in time\n");
+ return 1;
+ }
+ *t -= spin_unit;
+ ndelay(spin_unit);
+ touch_nmi_watchdog();
+ return 0;
+}
+
+static int erst_exec_load_var1(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->var1);
+}
+
+static int erst_exec_load_var2(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->var2);
+}
+
+static int erst_exec_store_var1(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_write_register(entry, ctx->var1);
+}
+
+static int erst_exec_add(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->var1 += ctx->var2;
+ return 0;
+}
+
+static int erst_exec_subtract(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->var1 -= ctx->var2;
+ return 0;
+}
+
+static int erst_exec_add_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ val += ctx->value;
+ rc = __apei_exec_write_register(entry, val);
+ return rc;
+}
+
+static int erst_exec_subtract_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ val -= ctx->value;
+ rc = __apei_exec_write_register(entry, val);
+ return rc;
+}
+
+static int erst_exec_stall(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ u64 stall_time;
+
+ if (ctx->value > FIRMWARE_MAX_STALL) {
+ if (!in_nmi())
+ pr_warning(FW_WARN ERST_PFX
+ "Too long stall time for stall instruction: %llx.\n",
+ ctx->value);
+ stall_time = FIRMWARE_MAX_STALL;
+ } else
+ stall_time = ctx->value;
+ udelay(stall_time);
+ return 0;
+}
+
+static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 stall_time;
+
+ if (ctx->var1 > FIRMWARE_MAX_STALL) {
+ if (!in_nmi())
+ pr_warning(FW_WARN ERST_PFX
+ "Too long stall time for stall while true instruction: %llx.\n",
+ ctx->var1);
+ stall_time = FIRMWARE_MAX_STALL;
+ } else
+ stall_time = ctx->var1;
+
+ for (;;) {
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ if (val != ctx->value)
+ break;
+ if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int erst_exec_skip_next_instruction_if_true(
+ struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ if (val == ctx->value) {
+ ctx->ip += 2;
+ return APEI_EXEC_SET_IP;
+ }
+
+ return 0;
+}
+
+static int erst_exec_goto(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->ip = ctx->value;
+ return APEI_EXEC_SET_IP;
+}
+
+static int erst_exec_set_src_address_base(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->src_base);
+}
+
+static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->dst_base);
+}
+
+static int erst_exec_move_data(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 offset;
+
+ rc = __apei_exec_read_register(entry, &offset);
+ if (rc)
+ return rc;
+ memmove((void *)ctx->dst_base + offset,
+ (void *)ctx->src_base + offset,
+ ctx->var2);
+
+ return 0;
+}
+
+static struct apei_exec_ins_type erst_ins_type[] = {
+ [ACPI_ERST_READ_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register,
+ },
+ [ACPI_ERST_READ_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register_value,
+ },
+ [ACPI_ERST_WRITE_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register,
+ },
+ [ACPI_ERST_WRITE_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register_value,
+ },
+ [ACPI_ERST_NOOP] = {
+ .flags = 0,
+ .run = apei_exec_noop,
+ },
+ [ACPI_ERST_LOAD_VAR1] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_load_var1,
+ },
+ [ACPI_ERST_LOAD_VAR2] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_load_var2,
+ },
+ [ACPI_ERST_STORE_VAR1] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_store_var1,
+ },
+ [ACPI_ERST_ADD] = {
+ .flags = 0,
+ .run = erst_exec_add,
+ },
+ [ACPI_ERST_SUBTRACT] = {
+ .flags = 0,
+ .run = erst_exec_subtract,
+ },
+ [ACPI_ERST_ADD_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_add_value,
+ },
+ [ACPI_ERST_SUBTRACT_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_subtract_value,
+ },
+ [ACPI_ERST_STALL] = {
+ .flags = 0,
+ .run = erst_exec_stall,
+ },
+ [ACPI_ERST_STALL_WHILE_TRUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_stall_while_true,
+ },
+ [ACPI_ERST_SKIP_NEXT_IF_TRUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_skip_next_instruction_if_true,
+ },
+ [ACPI_ERST_GOTO] = {
+ .flags = 0,
+ .run = erst_exec_goto,
+ },
+ [ACPI_ERST_SET_SRC_ADDRESS_BASE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_set_src_address_base,
+ },
+ [ACPI_ERST_SET_DST_ADDRESS_BASE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_set_dst_address_base,
+ },
+ [ACPI_ERST_MOVE_DATA] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_move_data,
+ },
+};
+
+static inline void erst_exec_ctx_init(struct apei_exec_context *ctx)
+{
+ apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type),
+ ERST_TAB_ENTRY(erst_tab), erst_tab->entries);
+}
+
+static int erst_get_erange(struct erst_erange *range)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE);
+ if (rc)
+ return rc;
+ range->base = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH);
+ if (rc)
+ return rc;
+ range->size = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES);
+ if (rc)
+ return rc;
+ range->attr = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+static ssize_t __erst_get_record_count(void)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT);
+ if (rc)
+ return rc;
+ return apei_exec_ctx_get_output(&ctx);
+}
+
+ssize_t erst_get_record_count(void)
+{
+ ssize_t count;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ count = __erst_get_record_count();
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(erst_get_record_count);
+
+static int __erst_get_next_record_id(u64 *record_id)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID);
+ if (rc)
+ return rc;
+ *record_id = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+/*
+ * Get the record ID of an existing error record on the persistent
+ * storage. If there is no error record on the persistent storage, the
+ * returned record_id is APEI_ERST_INVALID_RECORD_ID.
+ */
+int erst_get_next_record_id(u64 *record_id)
+{
+ int rc;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(record_id);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_get_next_record_id);
+
+static int __erst_write_to_storage(u64 offset)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, offset);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+static int __erst_read_from_storage(u64 record_id, u64 offset)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, offset);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, record_id);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ };
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+static int __erst_clear_from_storage(u64 record_id)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, record_id);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+/* NVRAM ERST Error Log Address Range is not supported yet */
+static void pr_unimpl_nvram(void)
+{
+ if (printk_ratelimit())
+ pr_warning(ERST_PFX
+ "NVRAM ERST Log Address Range is not implemented yet\n");
+}
+
+static int __erst_write_to_nvram(const struct cper_record_header *record)
+{
+ /* do not print message, because printk is not safe for NMI */
+ return -ENOSYS;
+}
+
+static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset)
+{
+ pr_unimpl_nvram();
+ return -ENOSYS;
+}
+
+static int __erst_clear_from_nvram(u64 record_id)
+{
+ pr_unimpl_nvram();
+ return -ENOSYS;
+}
+
+int erst_write(const struct cper_record_header *record)
+{
+ int rc;
+ unsigned long flags;
+ struct cper_record_header *rcd_erange;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE))
+ return -EINVAL;
+
+ if (erst_erange.attr & ERST_RANGE_NVRAM) {
+ if (!spin_trylock_irqsave(&erst_lock, flags))
+ return -EBUSY;
+ rc = __erst_write_to_nvram(record);
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+
+ if (record->record_length > erst_erange.size)
+ return -EINVAL;
+
+ if (!spin_trylock_irqsave(&erst_lock, flags))
+ return -EBUSY;
+ memcpy(erst_erange.vaddr, record, record->record_length);
+ rcd_erange = erst_erange.vaddr;
+ /* signature for serialization system */
+ memcpy(&rcd_erange->persistence_information, "ER", 2);
+
+ rc = __erst_write_to_storage(0);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_write);
+
+static int __erst_read_to_erange(u64 record_id, u64 *offset)
+{
+ int rc;
+
+ if (erst_erange.attr & ERST_RANGE_NVRAM)
+ return __erst_read_to_erange_from_nvram(
+ record_id, offset);
+
+ rc = __erst_read_from_storage(record_id, 0);
+ if (rc)
+ return rc;
+ *offset = 0;
+
+ return 0;
+}
+
+static ssize_t __erst_read(u64 record_id, struct cper_record_header *record,
+ size_t buflen)
+{
+ int rc;
+ u64 offset, len = 0;
+ struct cper_record_header *rcd_tmp;
+
+ rc = __erst_read_to_erange(record_id, &offset);
+ if (rc)
+ return rc;
+ rcd_tmp = erst_erange.vaddr + offset;
+ len = rcd_tmp->record_length;
+ if (len <= buflen)
+ memcpy(record, rcd_tmp, len);
+
+ return len;
+}
+
+/*
+ * If return value > buflen, the buffer size is not big enough,
+ * else if return value < 0, something goes wrong,
+ * else everything is OK, and return value is record length
+ */
+ssize_t erst_read(u64 record_id, struct cper_record_header *record,
+ size_t buflen)
+{
+ ssize_t len;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ len = __erst_read(record_id, record, buflen);
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return len;
+}
+EXPORT_SYMBOL_GPL(erst_read);
+
+/*
+ * If return value > buflen, the buffer size is not big enough,
+ * else if return value = 0, there is no more record to read,
+ * else if return value < 0, something goes wrong,
+ * else everything is OK, and return value is record length
+ */
+ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
+{
+ int rc;
+ ssize_t len;
+ unsigned long flags;
+ u64 record_id;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(&record_id);
+ if (rc) {
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+ /* no more record */
+ if (record_id == APEI_ERST_INVALID_RECORD_ID) {
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return 0;
+ }
+
+ len = __erst_read(record_id, record, buflen);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(erst_read_next);
+
+int erst_clear(u64 record_id)
+{
+ int rc;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ if (erst_erange.attr & ERST_RANGE_NVRAM)
+ rc = __erst_clear_from_nvram(record_id);
+ else
+ rc = __erst_clear_from_storage(record_id);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_clear);
+
+static int __init setup_erst_disable(char *str)
+{
+ erst_disable = 1;
+ return 0;
+}
+
+__setup("erst_disable", setup_erst_disable);
+
+static int erst_check_table(struct acpi_table_erst *erst_tab)
+{
+ if (erst_tab->header_length != sizeof(struct acpi_table_erst))
+ return -EINVAL;
+ if (erst_tab->header.length < sizeof(struct acpi_table_erst))
+ return -EINVAL;
+ if (erst_tab->entries !=
+ (erst_tab->header.length - sizeof(struct acpi_table_erst)) /
+ sizeof(struct acpi_erst_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init erst_init(void)
+{
+ int rc = 0;
+ acpi_status status;
+ struct apei_exec_context ctx;
+ struct apei_resources erst_resources;
+ struct resource *r;
+
+ if (acpi_disabled)
+ goto err;
+
+ if (erst_disable) {
+ pr_info(ERST_PFX
+ "Error Record Serialization Table (ERST) support is disabled.\n");
+ goto err;
+ }
+
+ status = acpi_get_table(ACPI_SIG_ERST, 0,
+ (struct acpi_table_header **)&erst_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_err(ERST_PFX "Table is not found!\n");
+ goto err;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(ERST_PFX "Failed to get table, %s\n", msg);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = erst_check_table(erst_tab);
+ if (rc) {
+ pr_err(FW_BUG ERST_PFX "ERST table is invalid\n");
+ goto err;
+ }
+
+ apei_resources_init(&erst_resources);
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_collect_resources(&ctx, &erst_resources);
+ if (rc)
+ goto err_fini;
+ rc = apei_resources_request(&erst_resources, "APEI ERST");
+ if (rc)
+ goto err_fini;
+ rc = apei_exec_pre_map_gars(&ctx);
+ if (rc)
+ goto err_release;
+ rc = erst_get_erange(&erst_erange);
+ if (rc) {
+ if (rc == -ENODEV)
+ pr_info(ERST_PFX
+ "The corresponding hardware device or firmware implementation "
+ "is not available.\n");
+ else
+ pr_err(ERST_PFX
+ "Failed to get Error Log Address Range.\n");
+ goto err_unmap_reg;
+ }
+
+ r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
+ if (!r) {
+ pr_err(ERST_PFX
+ "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n",
+ (unsigned long long)erst_erange.base,
+ (unsigned long long)erst_erange.base + erst_erange.size);
+ rc = -EIO;
+ goto err_unmap_reg;
+ }
+ rc = -ENOMEM;
+ erst_erange.vaddr = ioremap_cache(erst_erange.base,
+ erst_erange.size);
+ if (!erst_erange.vaddr)
+ goto err_release_erange;
+
+ pr_info(ERST_PFX
+ "Error Record Serialization Table (ERST) support is initialized.\n");
+
+ return 0;
+
+err_release_erange:
+ release_mem_region(erst_erange.base, erst_erange.size);
+err_unmap_reg:
+ apei_exec_post_unmap_gars(&ctx);
+err_release:
+ apei_resources_release(&erst_resources);
+err_fini:
+ apei_resources_fini(&erst_resources);
+err:
+ erst_disable = 1;
+ return rc;
+}
+
+device_initcall(erst_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
new file mode 100644
index 000000000000..fd0cc016a099
--- /dev/null
+++ b/drivers/acpi/apei/ghes.c
@@ -0,0 +1,427 @@
+/*
+ * APEI Generic Hardware Error Source support
+ *
+ * Generic Hardware Error Source provides a way to report platform
+ * hardware errors (such as that from chipset). It works in so called
+ * "Firmware First" mode, that is, hardware errors are reported to
+ * firmware firstly, then reported to Linux by firmware. This way,
+ * some non-standard hardware error registers or non-standard hardware
+ * link can be checked by firmware to produce more hardware error
+ * information for Linux.
+ *
+ * For more information about Generic Hardware Error Source, please
+ * refer to ACPI Specification version 4.0, section 17.3.2.6
+ *
+ * Now, only SCI notification type and memory errors are
+ * supported. More notification type and hardware error type will be
+ * added later.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/cper.h>
+#include <linux/kdebug.h>
+#include <acpi/apei.h>
+#include <acpi/atomicio.h>
+#include <acpi/hed.h>
+#include <asm/mce.h>
+
+#include "apei-internal.h"
+
+#define GHES_PFX "GHES: "
+
+#define GHES_ESTATUS_MAX_SIZE 65536
+
+/*
+ * One struct ghes is created for each generic hardware error
+ * source.
+ *
+ * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
+ * handler. Handler for one generic hardware error source is only
+ * triggered after the previous one is done. So handler can uses
+ * struct ghes without locking.
+ *
+ * estatus: memory buffer for error status block, allocated during
+ * HEST parsing.
+ */
+#define GHES_TO_CLEAR 0x0001
+
+struct ghes {
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ struct list_head list;
+ u64 buffer_paddr;
+ unsigned long flags;
+};
+
+/*
+ * Error source lists, one list for each notification method. The
+ * members in lists are struct ghes.
+ *
+ * The list members are only added in HEST parsing and deleted during
+ * module_exit, that is, single-threaded. So no lock is needed for
+ * that.
+ *
+ * But the mutual exclusion is needed between members adding/deleting
+ * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
+ * used for that.
+ */
+static LIST_HEAD(ghes_sci);
+
+static struct ghes *ghes_new(struct acpi_hest_generic *generic)
+{
+ struct ghes *ghes;
+ unsigned int error_block_length;
+ int rc;
+
+ ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
+ if (!ghes)
+ return ERR_PTR(-ENOMEM);
+ ghes->generic = generic;
+ INIT_LIST_HEAD(&ghes->list);
+ rc = acpi_pre_map_gar(&generic->error_status_address);
+ if (rc)
+ goto err_free;
+ error_block_length = generic->error_block_length;
+ if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
+ pr_warning(FW_WARN GHES_PFX
+ "Error status block length is too long: %u for "
+ "generic hardware error source: %d.\n",
+ error_block_length, generic->header.source_id);
+ error_block_length = GHES_ESTATUS_MAX_SIZE;
+ }
+ ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
+ if (!ghes->estatus) {
+ rc = -ENOMEM;
+ goto err_unmap;
+ }
+
+ return ghes;
+
+err_unmap:
+ acpi_post_unmap_gar(&generic->error_status_address);
+err_free:
+ kfree(ghes);
+ return ERR_PTR(rc);
+}
+
+static void ghes_fini(struct ghes *ghes)
+{
+ kfree(ghes->estatus);
+ acpi_post_unmap_gar(&ghes->generic->error_status_address);
+}
+
+enum {
+ GHES_SER_NO = 0x0,
+ GHES_SER_CORRECTED = 0x1,
+ GHES_SER_RECOVERABLE = 0x2,
+ GHES_SER_PANIC = 0x3,
+};
+
+static inline int ghes_severity(int severity)
+{
+ switch (severity) {
+ case CPER_SER_INFORMATIONAL:
+ return GHES_SER_NO;
+ case CPER_SER_CORRECTED:
+ return GHES_SER_CORRECTED;
+ case CPER_SER_RECOVERABLE:
+ return GHES_SER_RECOVERABLE;
+ case CPER_SER_FATAL:
+ return GHES_SER_PANIC;
+ default:
+ /* Unkown, go panic */
+ return GHES_SER_PANIC;
+ }
+}
+
+/* SCI handler run in work queue, so ioremap can be used here */
+static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
+ int from_phys)
+{
+ void *vaddr;
+
+ vaddr = ioremap_cache(paddr, len);
+ if (!vaddr)
+ return -ENOMEM;
+ if (from_phys)
+ memcpy(buffer, vaddr, len);
+ else
+ memcpy(vaddr, buffer, len);
+ iounmap(vaddr);
+
+ return 0;
+}
+
+static int ghes_read_estatus(struct ghes *ghes, int silent)
+{
+ struct acpi_hest_generic *g = ghes->generic;
+ u64 buf_paddr;
+ u32 len;
+ int rc;
+
+ rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
+ if (rc) {
+ if (!silent && printk_ratelimit())
+ pr_warning(FW_WARN GHES_PFX
+"Failed to read error status block address for hardware error source: %d.\n",
+ g->header.source_id);
+ return -EIO;
+ }
+ if (!buf_paddr)
+ return -ENOENT;
+
+ rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
+ sizeof(*ghes->estatus), 1);
+ if (rc)
+ return rc;
+ if (!ghes->estatus->block_status)
+ return -ENOENT;
+
+ ghes->buffer_paddr = buf_paddr;
+ ghes->flags |= GHES_TO_CLEAR;
+
+ rc = -EIO;
+ len = apei_estatus_len(ghes->estatus);
+ if (len < sizeof(*ghes->estatus))
+ goto err_read_block;
+ if (len > ghes->generic->error_block_length)
+ goto err_read_block;
+ if (apei_estatus_check_header(ghes->estatus))
+ goto err_read_block;
+ rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
+ buf_paddr + sizeof(*ghes->estatus),
+ len - sizeof(*ghes->estatus), 1);
+ if (rc)
+ return rc;
+ if (apei_estatus_check(ghes->estatus))
+ goto err_read_block;
+ rc = 0;
+
+err_read_block:
+ if (rc && !silent)
+ pr_warning(FW_WARN GHES_PFX
+ "Failed to read error status block!\n");
+ return rc;
+}
+
+static void ghes_clear_estatus(struct ghes *ghes)
+{
+ ghes->estatus->block_status = 0;
+ if (!(ghes->flags & GHES_TO_CLEAR))
+ return;
+ ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
+ sizeof(ghes->estatus->block_status), 0);
+ ghes->flags &= ~GHES_TO_CLEAR;
+}
+
+static void ghes_do_proc(struct ghes *ghes)
+{
+ int ser, processed = 0;
+ struct acpi_hest_generic_data *gdata;
+
+ ser = ghes_severity(ghes->estatus->error_severity);
+ apei_estatus_for_each_section(ghes->estatus, gdata) {
+#ifdef CONFIG_X86_MCE
+ if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
+ CPER_SEC_PLATFORM_MEM)) {
+ apei_mce_report_mem_error(
+ ser == GHES_SER_CORRECTED,
+ (struct cper_sec_mem_err *)(gdata+1));
+ processed = 1;
+ }
+#endif
+ }
+
+ if (!processed && printk_ratelimit())
+ pr_warning(GHES_PFX
+ "Unknown error record from generic hardware error source: %d\n",
+ ghes->generic->header.source_id);
+}
+
+static int ghes_proc(struct ghes *ghes)
+{
+ int rc;
+
+ rc = ghes_read_estatus(ghes, 0);
+ if (rc)
+ goto out;
+ ghes_do_proc(ghes);
+
+out:
+ ghes_clear_estatus(ghes);
+ return 0;
+}
+
+static int ghes_notify_sci(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct ghes *ghes;
+ int ret = NOTIFY_DONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ghes, &ghes_sci, list) {
+ if (!ghes_proc(ghes))
+ ret = NOTIFY_OK;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static struct notifier_block ghes_notifier_sci = {
+ .notifier_call = ghes_notify_sci,
+};
+
+static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct acpi_hest_generic *generic;
+ struct ghes *ghes = NULL;
+ int rc = 0;
+
+ if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
+ return 0;
+
+ generic = (struct acpi_hest_generic *)hest_hdr;
+ if (!generic->enabled)
+ return 0;
+
+ if (generic->error_block_length <
+ sizeof(struct acpi_hest_generic_status)) {
+ pr_warning(FW_BUG GHES_PFX
+"Invalid error block length: %u for generic hardware error source: %d\n",
+ generic->error_block_length,
+ generic->header.source_id);
+ goto err;
+ }
+ if (generic->records_to_preallocate == 0) {
+ pr_warning(FW_BUG GHES_PFX
+"Invalid records to preallocate: %u for generic hardware error source: %d\n",
+ generic->records_to_preallocate,
+ generic->header.source_id);
+ goto err;
+ }
+ ghes = ghes_new(generic);
+ if (IS_ERR(ghes)) {
+ rc = PTR_ERR(ghes);
+ ghes = NULL;
+ goto err;
+ }
+ switch (generic->notify.type) {
+ case ACPI_HEST_NOTIFY_POLLED:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via POLL is not supported!\n",
+ generic->header.source_id);
+ break;
+ case ACPI_HEST_NOTIFY_EXTERNAL:
+ case ACPI_HEST_NOTIFY_LOCAL:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via IRQ is not supported!\n",
+ generic->header.source_id);
+ break;
+ case ACPI_HEST_NOTIFY_SCI:
+ if (list_empty(&ghes_sci))
+ register_acpi_hed_notifier(&ghes_notifier_sci);
+ list_add_rcu(&ghes->list, &ghes_sci);
+ break;
+ case ACPI_HEST_NOTIFY_NMI:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via NMI is not supported!\n",
+ generic->header.source_id);
+ break;
+ default:
+ pr_warning(FW_WARN GHES_PFX
+ "Unknown notification type: %u for generic hardware error source: %d\n",
+ generic->notify.type, generic->header.source_id);
+ break;
+ }
+
+ return 0;
+err:
+ if (ghes)
+ ghes_fini(ghes);
+ return rc;
+}
+
+static void ghes_cleanup(void)
+{
+ struct ghes *ghes, *nghes;
+
+ if (!list_empty(&ghes_sci))
+ unregister_acpi_hed_notifier(&ghes_notifier_sci);
+
+ synchronize_rcu();
+
+ list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) {
+ list_del(&ghes->list);
+ ghes_fini(ghes);
+ kfree(ghes);
+ }
+}
+
+static int __init ghes_init(void)
+{
+ int rc;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ if (hest_disable) {
+ pr_info(GHES_PFX "HEST is not enabled!\n");
+ return -EINVAL;
+ }
+
+ rc = apei_hest_parse(hest_ghes_parse, NULL);
+ if (rc) {
+ pr_err(GHES_PFX
+ "Error during parsing HEST generic hardware error sources.\n");
+ goto err_cleanup;
+ }
+
+ if (list_empty(&ghes_sci)) {
+ pr_info(GHES_PFX
+ "No functional generic hardware error sources.\n");
+ rc = -ENODEV;
+ goto err_cleanup;
+ }
+
+ pr_info(GHES_PFX
+ "Generic Hardware Error Source support is initialized.\n");
+
+ return 0;
+err_cleanup:
+ ghes_cleanup();
+ return rc;
+}
+
+static void __exit ghes_exit(void)
+{
+ ghes_cleanup();
+}
+
+module_init(ghes_init);
+module_exit(ghes_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
new file mode 100644
index 000000000000..e7f40d362cb3
--- /dev/null
+++ b/drivers/acpi/apei/hest.c
@@ -0,0 +1,173 @@
+/*
+ * APEI Hardware Error Souce Table support
+ *
+ * HEST describes error sources in detail; communicates operational
+ * parameters (i.e. severity levels, masking bits, and threshold
+ * values) to Linux as necessary. It also allows the BIOS to report
+ * non-standard error sources to Linux (for example, chipset-specific
+ * error registers).
+ *
+ * For more information about HEST, please refer to ACPI Specification
+ * version 4.0, section 17.3.2.
+ *
+ * Copyright 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/kdebug.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <acpi/apei.h>
+
+#include "apei-internal.h"
+
+#define HEST_PFX "HEST: "
+
+int hest_disable;
+EXPORT_SYMBOL_GPL(hest_disable);
+
+/* HEST table parsing */
+
+static struct acpi_table_hest *hest_tab;
+
+static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ return 0;
+}
+
+static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
+ [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
+ [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
+ [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi),
+ [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root),
+ [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer),
+ [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
+ [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
+};
+
+static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
+{
+ u16 hest_type = hest_hdr->type;
+ int len;
+
+ if (hest_type >= ACPI_HEST_TYPE_RESERVED)
+ return 0;
+
+ len = hest_esrc_len_tab[hest_type];
+
+ if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) {
+ struct acpi_hest_ia_corrected *cmc;
+ cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
+ len = sizeof(*cmc) + cmc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) {
+ struct acpi_hest_ia_machine_check *mc;
+ mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
+ len = sizeof(*mc) + mc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ }
+ BUG_ON(len == -1);
+
+ return len;
+};
+
+int apei_hest_parse(apei_hest_func_t func, void *data)
+{
+ struct acpi_hest_header *hest_hdr;
+ int i, rc, len;
+
+ if (hest_disable)
+ return -EINVAL;
+
+ hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
+ for (i = 0; i < hest_tab->error_source_count; i++) {
+ len = hest_esrc_len(hest_hdr);
+ if (!len) {
+ pr_warning(FW_WARN HEST_PFX
+ "Unknown or unused hardware error source "
+ "type: %d for hardware error source: %d.\n",
+ hest_hdr->type, hest_hdr->source_id);
+ return -EINVAL;
+ }
+ if ((void *)hest_hdr + len >
+ (void *)hest_tab + hest_tab->header.length) {
+ pr_warning(FW_BUG HEST_PFX
+ "Table contents overflow for hardware error source: %d.\n",
+ hest_hdr->source_id);
+ return -EINVAL;
+ }
+
+ rc = func(hest_hdr, data);
+ if (rc)
+ return rc;
+
+ hest_hdr = (void *)hest_hdr + len;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_hest_parse);
+
+static int __init setup_hest_disable(char *str)
+{
+ hest_disable = 1;
+ return 0;
+}
+
+__setup("hest_disable", setup_hest_disable);
+
+static int __init hest_init(void)
+{
+ acpi_status status;
+ int rc = -ENODEV;
+
+ if (acpi_disabled)
+ goto err;
+
+ if (hest_disable) {
+ pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
+ goto err;
+ }
+
+ status = acpi_get_table(ACPI_SIG_HEST, 0,
+ (struct acpi_table_header **)&hest_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(HEST_PFX "Table is not found!\n");
+ goto err;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(HEST_PFX "Failed to get table, %s\n", msg);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = apei_hest_parse(hest_void_parse, NULL);
+ if (rc)
+ goto err;
+
+ pr_info(HEST_PFX "HEST table parsing is initialized.\n");
+
+ return 0;
+err:
+ hest_disable = 1;
+ return rc;
+}
+
+subsys_initcall(hest_init);
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
new file mode 100644
index 000000000000..814b19249616
--- /dev/null
+++ b/drivers/acpi/atomicio.c
@@ -0,0 +1,360 @@
+/*
+ * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
+ * accessing in atomic context.
+ *
+ * This is used for NMI handler to access IO memory area, because
+ * ioremap/iounmap can not be used in NMI handler. The IO memory area
+ * is pre-mapped in process context and accessed in NMI handler.
+ *
+ * Copyright (C) 2009-2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <acpi/atomicio.h>
+
+#define ACPI_PFX "ACPI: "
+
+static LIST_HEAD(acpi_iomaps);
+/*
+ * Used for mutual exclusion between writers of acpi_iomaps list, for
+ * synchronization between readers and writer, RCU is used.
+ */
+static DEFINE_SPINLOCK(acpi_iomaps_lock);
+
+struct acpi_iomap {
+ struct list_head list;
+ void __iomem *vaddr;
+ unsigned long size;
+ phys_addr_t paddr;
+ struct kref ref;
+};
+
+/* acpi_iomaps_lock or RCU read lock must be held before calling */
+static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ list_for_each_entry_rcu(map, &acpi_iomaps, list) {
+ if (map->paddr + map->size >= paddr + size &&
+ map->paddr <= paddr)
+ return map;
+ }
+ return NULL;
+}
+
+/*
+ * Atomic "ioremap" used by NMI handler, if the specified IO memory
+ * area is not pre-mapped, NULL will be returned.
+ *
+ * acpi_iomaps_lock or RCU read lock must be held before calling
+ */
+static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map)
+ return map->vaddr + (paddr - map->paddr);
+ else
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map) {
+ kref_get(&map->ref);
+ return map->vaddr + (paddr - map->paddr);
+ } else
+ return NULL;
+}
+
+/*
+ * Used to pre-map the specified IO memory area. First try to find
+ * whether the area is already pre-mapped, if it is, increase the
+ * reference count (in __acpi_try_ioremap) and return; otherwise, do
+ * the real ioremap, and add the mapping into acpi_iomaps list.
+ */
+static void __iomem *acpi_pre_map(phys_addr_t paddr,
+ unsigned long size)
+{
+ void __iomem *vaddr;
+ struct acpi_iomap *map;
+ unsigned long pg_sz, flags;
+ phys_addr_t pg_off;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ if (vaddr)
+ return vaddr;
+
+ pg_off = paddr & PAGE_MASK;
+ pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
+ vaddr = ioremap(pg_off, pg_sz);
+ if (!vaddr)
+ return NULL;
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto err_unmap;
+ INIT_LIST_HEAD(&map->list);
+ map->paddr = pg_off;
+ map->size = pg_sz;
+ map->vaddr = vaddr;
+ kref_init(&map->ref);
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ if (vaddr) {
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ iounmap(map->vaddr);
+ kfree(map);
+ return vaddr;
+ }
+ list_add_tail_rcu(&map->list, &acpi_iomaps);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ return vaddr + (paddr - pg_off);
+err_unmap:
+ iounmap(vaddr);
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __acpi_kref_del_iomap(struct kref *ref)
+{
+ struct acpi_iomap *map;
+
+ map = container_of(ref, struct acpi_iomap, ref);
+ list_del_rcu(&map->list);
+}
+
+/*
+ * Used to post-unmap the specified IO memory area. The iounmap is
+ * done only if the reference count goes zero.
+ */
+static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
+{
+ struct acpi_iomap *map;
+ unsigned long flags;
+ int del;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ map = __acpi_find_iomap(paddr, size);
+ BUG_ON(!map);
+ del = kref_put(&map->ref, __acpi_kref_del_iomap);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ if (!del)
+ return;
+
+ synchronize_rcu();
+ iounmap(map->vaddr);
+ kfree(map);
+}
+
+/* In NMI handler, should set silent = 1 */
+static int acpi_check_gar(struct acpi_generic_address *reg,
+ u64 *paddr, int silent)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Pre-map, working on GAR */
+int acpi_pre_map_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ void __iomem *vaddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
+ if (!vaddr)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
+
+/* Post-unmap, working on GAR */
+int acpi_post_unmap_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ acpi_post_unmap(paddr, reg->bit_width / 8);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
+
+/*
+ * Can be used in atomic (including NMI) or process context. RCU read
+ * lock can only be released after the IO memory area accessing.
+ */
+static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ *val = readb(addr);
+ break;
+ case 16:
+ *val = readw(addr);
+ break;
+ case 32:
+ *val = readl(addr);
+ break;
+ case 64:
+ *val = readq(addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ writeb(val, addr);
+ break;
+ case 16:
+ writew(val, addr);
+ break;
+ case 32:
+ writel(val, addr);
+ break;
+ case 64:
+ writeq(val, addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/* GAR accessing in atomic (including NMI) or process context */
+int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ *val = 0;
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_read_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_read);
+
+int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_write_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_write_port(paddr, val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_write);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9042a8579668..c1d23cd71652 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -401,11 +401,6 @@ static void acpi_print_osc_error(acpi_handle handle,
printk("\n");
}
-static u8 hex_val(unsigned char c)
-{
- return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
-}
-
static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
{
int i;
@@ -422,8 +417,8 @@ static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
return AE_BAD_PARAMETER;
}
for (i = 0; i < 16; i++) {
- uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4;
- uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]);
+ uuid[i] = hex_to_bin(str[opc_map_to_uuid[i]]) << 4;
+ uuid[i] |= hex_to_bin(str[opc_map_to_uuid[i] + 1]);
}
return AE_OK;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f2234db85da0..5f2027d782e8 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -79,7 +79,7 @@ enum {
EC_FLAGS_GPE_STORM, /* GPE storm detected */
EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
* OpReg are installed */
- EC_FLAGS_FROZEN, /* Transactions are suspended */
+ EC_FLAGS_BLOCKED, /* Transactions are blocked */
};
/* If we find an EC via the ECDT, we need to keep a ptr to its context */
@@ -293,7 +293,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
if (t->rdata)
memset(t->rdata, 0, t->rlen);
mutex_lock(&ec->lock);
- if (test_bit(EC_FLAGS_FROZEN, &ec->flags)) {
+ if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) {
status = -EINVAL;
goto unlock;
}
@@ -459,7 +459,7 @@ int ec_transaction(u8 command,
EXPORT_SYMBOL(ec_transaction);
-void acpi_ec_suspend_transactions(void)
+void acpi_ec_block_transactions(void)
{
struct acpi_ec *ec = first_ec;
@@ -468,11 +468,11 @@ void acpi_ec_suspend_transactions(void)
mutex_lock(&ec->lock);
/* Prevent transactions from being carried out */
- set_bit(EC_FLAGS_FROZEN, &ec->flags);
+ set_bit(EC_FLAGS_BLOCKED, &ec->flags);
mutex_unlock(&ec->lock);
}
-void acpi_ec_resume_transactions(void)
+void acpi_ec_unblock_transactions(void)
{
struct acpi_ec *ec = first_ec;
@@ -481,10 +481,20 @@ void acpi_ec_resume_transactions(void)
mutex_lock(&ec->lock);
/* Allow transactions to be carried out again */
- clear_bit(EC_FLAGS_FROZEN, &ec->flags);
+ clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
mutex_unlock(&ec->lock);
}
+void acpi_ec_unblock_transactions_early(void)
+{
+ /*
+ * Allow transactions to happen again (this function is called from
+ * atomic context during wakeup, so we don't need to acquire the mutex).
+ */
+ if (first_ec)
+ clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags);
+}
+
static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data)
{
int result;
@@ -1027,10 +1037,9 @@ int __init acpi_ec_ecdt_probe(void)
/* Don't trust ECDT, which comes from ASUSTek */
if (!EC_FLAGS_VALIDATE_ECDT)
goto install;
- saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
+ saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
if (!saved_ec)
return -ENOMEM;
- memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
/* fall through */
}
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
new file mode 100644
index 000000000000..d0c1967f7597
--- /dev/null
+++ b/drivers/acpi/hed.c
@@ -0,0 +1,112 @@
+/*
+ * ACPI Hardware Error Device (PNP0C33) Driver
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * ACPI Hardware Error Device is used to report some hardware errors
+ * notified via SCI, mainly the corrected errors.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/hed.h>
+
+static struct acpi_device_id acpi_hed_ids[] = {
+ {"PNP0C33", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, acpi_hed_ids);
+
+static acpi_handle hed_handle;
+
+static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list);
+
+int register_acpi_hed_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_hed_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_acpi_hed_notifier);
+
+void unregister_acpi_hed_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier);
+
+/*
+ * SCI to report hardware error is forwarded to the listeners of HED,
+ * it is used by HEST Generic Hardware Error Source with notify type
+ * SCI.
+ */
+static void acpi_hed_notify(struct acpi_device *device, u32 event)
+{
+ blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
+}
+
+static int __devinit acpi_hed_add(struct acpi_device *device)
+{
+ /* Only one hardware error device */
+ if (hed_handle)
+ return -EINVAL;
+ hed_handle = device->handle;
+ return 0;
+}
+
+static int __devexit acpi_hed_remove(struct acpi_device *device, int type)
+{
+ hed_handle = NULL;
+ return 0;
+}
+
+static struct acpi_driver acpi_hed_driver = {
+ .name = "hardware_error_device",
+ .class = "hardware_error",
+ .ids = acpi_hed_ids,
+ .ops = {
+ .add = acpi_hed_add,
+ .remove = acpi_hed_remove,
+ .notify = acpi_hed_notify,
+ },
+};
+
+static int __init acpi_hed_init(void)
+{
+ if (acpi_disabled)
+ return -ENODEV;
+
+ if (acpi_bus_register_driver(&acpi_hed_driver) < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit acpi_hed_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_hed_driver);
+}
+
+module_init(acpi_hed_init);
+module_exit(acpi_hed_exit);
+
+ACPI_MODULE_NAME("hed");
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
deleted file mode 100644
index 1c527a192872..000000000000
--- a/drivers/acpi/hest.c
+++ /dev/null
@@ -1,139 +0,0 @@
-#include <linux/acpi.h>
-#include <linux/pci.h>
-
-#define PREFIX "ACPI: "
-
-static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
-{
- return sizeof(*p) +
- (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
-}
-
-static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
-{
- return sizeof(*p) +
- (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
-}
-
-static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
-{
- return sizeof(*p);
-}
-
-static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
-{
- return sizeof(*p);
-}
-
-static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
-{
- return (0 == pci_domain_nr(pci->bus) &&
- p->bus == pci->bus->number &&
- p->device == PCI_SLOT(pci->devfn) &&
- p->function == PCI_FUNC(pci->devfn));
-}
-
-static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
-{
- struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
- unsigned long rc=0;
- u8 pcie_type = 0;
- u8 bridge = 0;
- switch (type) {
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- rc = sizeof(struct acpi_hest_aer_root);
- pcie_type = PCI_EXP_TYPE_ROOT_PORT;
- break;
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- rc = sizeof(struct acpi_hest_aer);
- pcie_type = PCI_EXP_TYPE_ENDPOINT;
- break;
- case ACPI_HEST_TYPE_AER_BRIDGE:
- rc = sizeof(struct acpi_hest_aer_bridge);
- if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
- bridge = 1;
- break;
- }
-
- if (p->flags & ACPI_HEST_GLOBAL) {
- if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
- *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- }
- else
- if (hest_match_pci(p, pci))
- *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- return rc;
-}
-
-static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
-{
- struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
- void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
- struct acpi_hest_header *hdr = p;
-
- int i;
- int firmware_first = 0;
- static unsigned char printed_unused = 0;
- static unsigned char printed_reserved = 0;
-
- for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
- switch (hdr->type) {
- case ACPI_HEST_TYPE_IA32_CHECK:
- p += parse_acpi_hest_ia_machine_check(p);
- break;
- case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
- p += parse_acpi_hest_ia_corrected(p);
- break;
- case ACPI_HEST_TYPE_IA32_NMI:
- p += parse_acpi_hest_ia_nmi(p);
- break;
- /* These three should never appear */
- case ACPI_HEST_TYPE_NOT_USED3:
- case ACPI_HEST_TYPE_NOT_USED4:
- case ACPI_HEST_TYPE_NOT_USED5:
- if (!printed_unused) {
- printk(KERN_DEBUG PREFIX
- "HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
- printed_unused = 1;
- }
- break;
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- case ACPI_HEST_TYPE_AER_BRIDGE:
- p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
- break;
- case ACPI_HEST_TYPE_GENERIC_ERROR:
- p += parse_acpi_hest_generic(p);
- break;
- /* These should never appear either */
- case ACPI_HEST_TYPE_RESERVED:
- default:
- if (!printed_reserved) {
- printk(KERN_DEBUG PREFIX
- "HEST Error Source list contains a reserved type (%d).\n", hdr->type);
- printed_reserved = 1;
- }
- break;
- }
- }
- return firmware_first;
-}
-
-int acpi_hest_firmware_first_pci(struct pci_dev *pci)
-{
- acpi_status status = AE_NOT_FOUND;
- struct acpi_table_header *hest = NULL;
-
- if (acpi_disabled)
- return 0;
-
- status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
-
- if (ACPI_SUCCESS(status)) {
- if (acpi_hest_firmware_first(hest, pci)) {
- return 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index e28411367239..f8f190ec066e 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -49,8 +49,9 @@ void acpi_early_processor_set_pdc(void);
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
int acpi_boot_ec_enable(void);
-void acpi_ec_suspend_transactions(void);
-void acpi_ec_resume_transactions(void);
+void acpi_ec_block_transactions(void);
+void acpi_ec_unblock_transactions(void);
+void acpi_ec_unblock_transactions_early(void);
/*--------------------------------------------------------------------------
Suspend/Resume
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 4bc1c4178f50..78418ce4fc78 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1207,6 +1207,15 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
EXPORT_SYMBOL(acpi_check_mem_region);
/*
+ * Let drivers know whether the resource checks are effective
+ */
+int acpi_resources_are_enforced(void)
+{
+ return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
+}
+EXPORT_SYMBOL(acpi_resources_are_enforced);
+
+/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index aefce33f2a09..4eac59393edc 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
struct acpi_pci_root *root;
list_for_each_entry(root, &acpi_pci_roots, node)
- if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus))
+ if ((root->segment == (u16) seg) &&
+ (root->secondary.start == (u16) bus))
return root->device->handle;
return NULL;
}
@@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
{
- int *busnr = data;
+ struct resource *res = data;
struct acpi_resource_address64 address;
if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
@@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
acpi_resource_to_address64(resource, &address);
if ((address.address_length > 0) &&
- (address.resource_type == ACPI_BUS_NUMBER_RANGE))
- *busnr = address.minimum;
+ (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
+ res->start = address.minimum;
+ res->end = address.minimum + address.address_length - 1;
+ }
return AE_OK;
}
static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
- unsigned long long *bus)
+ struct resource *res)
{
acpi_status status;
- int busnum;
- busnum = -1;
+ res->start = -1;
status =
acpi_walk_resources(handle, METHOD_NAME__CRS,
- get_root_bridge_busnr_callback, &busnum);
+ get_root_bridge_busnr_callback, res);
if (ACPI_FAILURE(status))
return status;
- /* Check if we really get a bus number from _CRS */
- if (busnum == -1)
+ if (res->start == -1)
return AE_ERROR;
- *bus = busnum;
return AE_OK;
}
@@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
struct acpi_device *child;
u32 flags, base_flags;
+ root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
+ if (!root)
+ return -ENOMEM;
+
segment = 0;
status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
&segment);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
- return -ENODEV;
+ result = -ENODEV;
+ goto end;
}
/* Check _CRS first, then _BBN. If no _BBN, default to zero. */
- bus = 0;
- status = try_get_root_bridge_busnr(device->handle, &bus);
+ root->secondary.flags = IORESOURCE_BUS;
+ status = try_get_root_bridge_busnr(device->handle, &root->secondary);
if (ACPI_FAILURE(status)) {
+ /*
+ * We need both the start and end of the downstream bus range
+ * to interpret _CBA (MMCONFIG base address), so it really is
+ * supposed to be in _CRS. If we don't find it there, all we
+ * can do is assume [_BBN-0xFF] or [0-0xFF].
+ */
+ root->secondary.end = 0xFF;
+ printk(KERN_WARNING FW_BUG PREFIX
+ "no secondary bus range in _CRS\n");
status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_ERR PREFIX
- "no bus number in _CRS and can't evaluate _BBN\n");
- return -ENODEV;
+ if (ACPI_SUCCESS(status))
+ root->secondary.start = bus;
+ else if (status == AE_NOT_FOUND)
+ root->secondary.start = 0;
+ else {
+ printk(KERN_ERR PREFIX "can't evaluate _BBN\n");
+ result = -ENODEV;
+ goto end;
}
}
- root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
- if (!root)
- return -ENOMEM;
-
INIT_LIST_HEAD(&root->node);
root->device = device;
root->segment = segment & 0xFFFF;
- root->bus_nr = bus & 0xFF;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
@@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
/* TBD: Locking */
list_add_tail(&root->node, &acpi_pci_roots);
- printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n",
+ printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
- root->segment, root->bus_nr);
+ root->segment, &root->secondary);
/*
* Scan the Root Bridge
@@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
- root->bus = pci_acpi_scan_root(device, segment, bus);
+ root->bus = pci_acpi_scan_root(root);
if (!root->bus) {
printk(KERN_ERR PREFIX
"Bus %04x:%02x not present in PCI namespace\n",
- root->segment, root->bus_nr);
+ root->segment, (unsigned int)root->secondary.start);
result = -ENODEV;
goto end;
}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 5675d9747e87..b1034a9ada4e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -616,7 +616,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
acpi_processor_get_limit_info(pr);
- acpi_processor_power_init(pr, device);
+ if (cpuidle_get_driver() == &acpi_idle_driver)
+ acpi_processor_power_init(pr, device);
pr->cdev = thermal_cooling_device_register("Processor", device,
&processor_cooling_ops);
@@ -920,9 +921,14 @@ static int __init acpi_processor_init(void)
if (!acpi_processor_dir)
return -ENOMEM;
#endif
- result = cpuidle_register_driver(&acpi_idle_driver);
- if (result < 0)
- goto out_proc;
+
+ if (!cpuidle_register_driver(&acpi_idle_driver)) {
+ printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
+ acpi_idle_driver.name);
+ } else {
+ printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
+ cpuidle_get_driver()->name);
+ }
result = acpi_bus_register_driver(&acpi_processor_driver);
if (result < 0)
@@ -941,7 +947,6 @@ static int __init acpi_processor_init(void)
out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
-out_proc:
#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
#endif
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index c3817e1f32c7..b1b385692f46 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -80,7 +80,7 @@ module_param(nocst, uint, 0000);
static unsigned int latency_factor __read_mostly = 2;
module_param(latency_factor, uint, 0644);
-static s64 us_to_pm_timer_ticks(s64 t)
+static u64 us_to_pm_timer_ticks(s64 t)
{
return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
}
@@ -727,24 +727,14 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
break;
}
- if (pr->power.states[i].promotion.state)
- seq_printf(seq, "promotion[C%zd] ",
- (pr->power.states[i].promotion.state -
- pr->power.states));
- else
- seq_puts(seq, "promotion[--] ");
-
- if (pr->power.states[i].demotion.state)
- seq_printf(seq, "demotion[C%zd] ",
- (pr->power.states[i].demotion.state -
- pr->power.states));
- else
- seq_puts(seq, "demotion[--] ");
-
- seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
+ seq_puts(seq, "promotion[--] ");
+
+ seq_puts(seq, "demotion[--] ");
+
+ seq_printf(seq, "latency[%03d] usage[%08d] duration[%020Lu]\n",
pr->power.states[i].latency,
pr->power.states[i].usage,
- (unsigned long long)pr->power.states[i].time);
+ us_to_pm_timer_ticks(pr->power.states[i].time));
}
end:
@@ -869,8 +859,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
ktime_t kt1, kt2;
+ s64 idle_time_ns;
s64 idle_time;
- s64 sleep_ticks = 0;
pr = __get_cpu_var(processors);
@@ -881,6 +871,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
return(acpi_idle_enter_c1(dev, state));
local_irq_disable();
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -888,12 +879,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
* NEED_RESCHED:
*/
smp_mb();
- }
- if (unlikely(need_resched())) {
- current_thread_info()->status |= TS_POLLING;
- local_irq_enable();
- return 0;
+ if (unlikely(need_resched())) {
+ current_thread_info()->status |= TS_POLLING;
+ local_irq_enable();
+ return 0;
+ }
}
/*
@@ -910,20 +901,21 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
- idle_time = ktime_to_us(ktime_sub(kt2, kt1));
-
- sleep_ticks = us_to_pm_timer_ticks(idle_time);
+ idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
+ idle_time = idle_time_ns;
+ do_div(idle_time, NSEC_PER_USEC);
/* Tell the scheduler how much we idled: */
- sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+ sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
- current_thread_info()->status |= TS_POLLING;
+ if (cx->entry_method != ACPI_CSTATE_FFH)
+ current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
- cx->time += sleep_ticks;
+ cx->time += idle_time;
return idle_time;
}
@@ -943,8 +935,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
ktime_t kt1, kt2;
+ s64 idle_time_ns;
s64 idle_time;
- s64 sleep_ticks = 0;
pr = __get_cpu_var(processors);
@@ -968,6 +960,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
}
local_irq_disable();
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -975,12 +968,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
* NEED_RESCHED:
*/
smp_mb();
- }
- if (unlikely(need_resched())) {
- current_thread_info()->status |= TS_POLLING;
- local_irq_enable();
- return 0;
+ if (unlikely(need_resched())) {
+ current_thread_info()->status |= TS_POLLING;
+ local_irq_enable();
+ return 0;
+ }
}
acpi_unlazy_tlb(smp_processor_id());
@@ -1025,19 +1018,21 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
- idle_time = ktime_to_us(ktime_sub(kt2, kt1));
+ idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
+ idle_time = idle_time_ns;
+ do_div(idle_time, NSEC_PER_USEC);
- sleep_ticks = us_to_pm_timer_ticks(idle_time);
/* Tell the scheduler how much we idled: */
- sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+ sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
- current_thread_info()->status |= TS_POLLING;
+ if (cx->entry_method != ACPI_CSTATE_FFH)
+ current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
- cx->time += sleep_ticks;
+ cx->time += idle_time;
return idle_time;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index baa76bbf244a..3fb4bdea7e06 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
-/*
- * According to the ACPI specification the BIOS should make sure that ACPI is
- * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
- * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
- * on such systems during resume. Unfortunately that doesn't help in
- * particularly pathological cases in which SCI_EN has to be set directly on
- * resume, although the specification states very clearly that this flag is
- * owned by the hardware. The set_sci_en_on_resume variable will be set in such
- * cases.
- */
-static bool set_sci_en_on_resume;
-
-void __init acpi_set_sci_en_on_resume(void)
-{
- set_sci_en_on_resume = true;
-}
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
@@ -110,11 +94,13 @@ void __init acpi_old_suspend_ordering(void)
}
/**
- * acpi_pm_disable_gpes - Disable the GPEs.
+ * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
*/
-static int acpi_pm_disable_gpes(void)
+static int acpi_pm_freeze(void)
{
acpi_disable_all_gpes();
+ acpi_os_wait_events_complete(NULL);
+ acpi_ec_block_transactions();
return 0;
}
@@ -142,7 +128,8 @@ static int acpi_pm_prepare(void)
int error = __acpi_pm_prepare();
if (!error)
- acpi_disable_all_gpes();
+ acpi_pm_freeze();
+
return error;
}
@@ -253,11 +240,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
break;
}
- /* If ACPI is not enabled by the BIOS, we need to enable it here. */
- if (set_sci_en_on_resume)
- acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
- else
- acpi_enable();
+ /* This violates the spec but is required for bug compatibility. */
+ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(acpi_state);
@@ -275,6 +259,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
* acpi_leave_sleep_state will reenable specific GPEs later
*/
acpi_disable_all_gpes();
+ /* Allow EC transactions to happen. */
+ acpi_ec_unblock_transactions_early();
local_irq_restore(flags);
printk(KERN_DEBUG "Back to C!\n");
@@ -286,6 +272,12 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
+static void acpi_suspend_finish(void)
+{
+ acpi_ec_unblock_transactions();
+ acpi_pm_finish();
+}
+
static int acpi_suspend_state_valid(suspend_state_t pm_state)
{
u32 acpi_state;
@@ -307,7 +299,7 @@ static struct platform_suspend_ops acpi_suspend_ops = {
.begin = acpi_suspend_begin,
.prepare_late = acpi_pm_prepare,
.enter = acpi_suspend_enter,
- .wake = acpi_pm_finish,
+ .wake = acpi_suspend_finish,
.end = acpi_pm_end,
};
@@ -333,9 +325,9 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
static struct platform_suspend_ops acpi_suspend_ops_old = {
.valid = acpi_suspend_state_valid,
.begin = acpi_suspend_begin_old,
- .prepare_late = acpi_pm_disable_gpes,
+ .prepare_late = acpi_pm_freeze,
.enter = acpi_suspend_enter,
- .wake = acpi_pm_finish,
+ .wake = acpi_suspend_finish,
.end = acpi_pm_end,
.recover = acpi_pm_finish,
};
@@ -346,12 +338,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
return 0;
}
-static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
-{
- set_sci_en_on_resume = true;
- return 0;
-}
-
static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
{
.callback = init_old_suspend_ordering,
@@ -370,22 +356,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacBook 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacMini 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
.matches = {
@@ -394,94 +364,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Toshiba Satellite L300",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP G7000 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv4",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv7",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T410",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad W510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201[s]",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Panasonic CF51-2L",
.matches = {
@@ -490,30 +372,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
},
},
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1558",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1557",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1555",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
- },
- },
{},
};
#endif /* CONFIG_SUSPEND */
@@ -586,6 +444,7 @@ static int acpi_hibernation_enter(void)
static void acpi_hibernation_finish(void)
{
hibernate_nvs_free();
+ acpi_ec_unblock_transactions();
acpi_pm_finish();
}
@@ -606,19 +465,13 @@ static void acpi_hibernation_leave(void)
}
/* Restore the NVS memory area */
hibernate_nvs_restore();
+ /* Allow EC transactions to happen. */
+ acpi_ec_unblock_transactions_early();
}
-static int acpi_pm_pre_restore(void)
+static void acpi_pm_thaw(void)
{
- acpi_disable_all_gpes();
- acpi_os_wait_events_complete(NULL);
- acpi_ec_suspend_transactions();
- return 0;
-}
-
-static void acpi_pm_restore_cleanup(void)
-{
- acpi_ec_resume_transactions();
+ acpi_ec_unblock_transactions();
acpi_enable_all_runtime_gpes();
}
@@ -630,8 +483,8 @@ static struct platform_hibernation_ops acpi_hibernation_ops = {
.prepare = acpi_pm_prepare,
.enter = acpi_hibernation_enter,
.leave = acpi_hibernation_leave,
- .pre_restore = acpi_pm_pre_restore,
- .restore_cleanup = acpi_pm_restore_cleanup,
+ .pre_restore = acpi_pm_freeze,
+ .restore_cleanup = acpi_pm_thaw,
};
/**
@@ -663,12 +516,9 @@ static int acpi_hibernation_begin_old(void)
static int acpi_hibernation_pre_snapshot_old(void)
{
- int error = acpi_pm_disable_gpes();
-
- if (!error)
- hibernate_nvs_save();
-
- return error;
+ acpi_pm_freeze();
+ hibernate_nvs_save();
+ return 0;
}
/*
@@ -680,11 +530,11 @@ static struct platform_hibernation_ops acpi_hibernation_ops_old = {
.end = acpi_pm_end,
.pre_snapshot = acpi_hibernation_pre_snapshot_old,
.finish = acpi_hibernation_finish,
- .prepare = acpi_pm_disable_gpes,
+ .prepare = acpi_pm_freeze,
.enter = acpi_hibernation_enter,
.leave = acpi_hibernation_leave,
- .pre_restore = acpi_pm_pre_restore,
- .restore_cleanup = acpi_pm_restore_cleanup,
+ .pre_restore = acpi_pm_freeze,
+ .restore_cleanup = acpi_pm_thaw,
.recover = acpi_pm_finish,
};
#endif /* CONFIG_HIBERNATION */
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 8a8f3b3382a6..25b8bd149284 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -1,6 +1,6 @@
extern u8 sleep_states[];
-extern int acpi_suspend (u32 state);
+extern int acpi_suspend(u32 state);
extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
extern void acpi_enable_wakeup_device(u8 sleep_state);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 8a0ed2800e63..f336bca7c450 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
unsigned long table_end;
acpi_size tbl_size;
- if (acpi_disabled && !acpi_ht)
+ if (acpi_disabled)
return -ENODEV;
if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
- if (acpi_disabled && !acpi_ht)
+ if (acpi_disabled)
return -ENODEV;
if (!handler)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a0c93b321482..9865d46f49a8 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -45,6 +45,7 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/suspend.h>
+#include <acpi/video.h>
#define PREFIX "ACPI: "
@@ -65,11 +66,6 @@
#define MAX_NAME_LEN 20
-#define ACPI_VIDEO_DISPLAY_CRT 1
-#define ACPI_VIDEO_DISPLAY_TV 2
-#define ACPI_VIDEO_DISPLAY_DVI 3
-#define ACPI_VIDEO_DISPLAY_LCD 4
-
#define _COMPONENT ACPI_VIDEO_COMPONENT
ACPI_MODULE_NAME("video");
@@ -1007,11 +1003,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
result = acpi_video_init_brightness(device);
if (result)
return;
- name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
+ count++;
- sprintf(name, "acpi_video%d", count++);
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = device->brightness->count - 3;
device->backlight = backlight_device_register(name, NULL, device,
@@ -1067,10 +1063,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (device->cap._DCS && device->cap._DSS) {
static int count;
char *name;
- name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
- sprintf(name, "acpi_video%d", count++);
+ count++;
device->output_dev = video_output_register(name,
NULL, device, &acpi_output_properties);
kfree(name);
@@ -1748,11 +1744,27 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id
}
static int
+acpi_video_get_device_type(struct acpi_video_bus *video,
+ unsigned long device_id)
+{
+ struct acpi_video_enumerated_device *ids;
+ int i;
+
+ for (i = 0; i < video->attached_count; i++) {
+ ids = &video->attached_array[i];
+ if ((ids->value.int_val & 0xffff) == device_id)
+ return ids->value.int_val;
+ }
+
+ return 0;
+}
+
+static int
acpi_video_bus_get_one_device(struct acpi_device *device,
struct acpi_video_bus *video)
{
unsigned long long device_id;
- int status;
+ int status, device_type;
struct acpi_video_device *data;
struct acpi_video_device_attrib* attribute;
@@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
}
if(attribute->bios_can_detect)
data->flags.bios = 1;
- } else
- data->flags.unknown = 1;
+ } else {
+ /* Check for legacy IDs */
+ device_type = acpi_video_get_device_type(video,
+ device_id);
+ /* Ignore bits 16 and 18-20 */
+ switch (device_type & 0xffe2ffff) {
+ case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
+ data->flags.crt = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
+ data->flags.lcd = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LEGACY_TV:
+ data->flags.tvout = 1;
+ break;
+ default:
+ data->flags.unknown = 1;
+ }
+ }
acpi_video_device_bind(video, data);
acpi_video_device_find_cap(data);
@@ -2032,6 +2061,71 @@ out:
return result;
}
+int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
+ void **edid)
+{
+ struct acpi_video_bus *video;
+ struct acpi_video_device *video_device;
+ union acpi_object *buffer = NULL;
+ acpi_status status;
+ int i, length;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ video = acpi_driver_data(device);
+
+ for (i = 0; i < video->attached_count; i++) {
+ video_device = video->attached_array[i].bind_info;
+ length = 256;
+
+ if (!video_device)
+ continue;
+
+ if (type) {
+ switch (type) {
+ case ACPI_VIDEO_DISPLAY_CRT:
+ if (!video_device->flags.crt)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_TV:
+ if (!video_device->flags.tvout)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_DVI:
+ if (!video_device->flags.dvi)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_LCD:
+ if (!video_device->flags.lcd)
+ continue;
+ break;
+ }
+ } else if (video_device->device_id != device_id) {
+ continue;
+ }
+
+ status = acpi_video_device_EDID(video_device, &buffer, length);
+
+ if (ACPI_FAILURE(status) || !buffer ||
+ buffer->type != ACPI_TYPE_BUFFER) {
+ length = 128;
+ status = acpi_video_device_EDID(video_device, &buffer,
+ length);
+ if (ACPI_FAILURE(status) || !buffer ||
+ buffer->type != ACPI_TYPE_BUFFER) {
+ continue;
+ }
+ }
+
+ *edid = buffer->buffer.pointer;
+ return length;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(acpi_video_get_edid);
+
static int
acpi_video_bus_get_devices(struct acpi_video_bus *video,
struct acpi_device *device)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index fc2f26b9b407..c5fef01b3c95 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
if (!strcmp("video", str))
acpi_video_support |=
- ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
+ ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
}
return 1;
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e68541f662b9..aa85a98d3a4f 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -57,6 +57,8 @@ config SATA_PMP
This option adds support for SATA Port Multipliers
(the SATA version of an ethernet hub, or SAS expander).
+comment "Controllers with non-SFF native interface"
+
config SATA_AHCI
tristate "AHCI SATA support"
depends on PCI
@@ -73,11 +75,12 @@ config SATA_AHCI_PLATFORM
If unsure, say N.
-config SATA_SIL24
- tristate "Silicon Image 3124/3132 SATA support"
- depends on PCI
+config SATA_FSL
+ tristate "Freescale 3.0Gbps SATA support"
+ depends on FSL_SOC
help
- This option enables support for Silicon Image 3124/3132 Serial ATA.
+ This option enables support for Freescale 3.0Gbps SATA controller.
+ It can be found on MPC837x and MPC8315.
If unsure, say N.
@@ -87,12 +90,11 @@ config SATA_INIC162X
help
This option enables support for Initio 162x Serial ATA.
-config SATA_FSL
- tristate "Freescale 3.0Gbps SATA support"
- depends on FSL_SOC
+config SATA_SIL24
+ tristate "Silicon Image 3124/3132 SATA support"
+ depends on PCI
help
- This option enables support for Freescale 3.0Gbps SATA controller.
- It can be found on MPC837x and MPC8315.
+ This option enables support for Silicon Image 3124/3132 Serial ATA.
If unsure, say N.
@@ -116,15 +118,65 @@ config ATA_SFF
if ATA_SFF
-config SATA_SVW
- tristate "ServerWorks Frodo / Apple K2 SATA support"
+comment "SFF controllers with custom DMA interface"
+
+config PDC_ADMA
+ tristate "Pacific Digital ADMA support"
depends on PCI
help
- This option enables support for Broadcom/Serverworks/Apple K2
- SATA support.
+ This option enables support for Pacific Digital ADMA controllers
+
+ If unsure, say N.
+
+config PATA_MPC52xx
+ tristate "Freescale MPC52xx SoC internal IDE"
+ depends on PPC_MPC52xx && PPC_BESTCOMM
+ select PPC_BESTCOMM_ATA
+ help
+ This option enables support for integrated IDE controller
+ of the Freescale MPC52xx SoC.
+
+ If unsure, say N.
+
+config PATA_OCTEON_CF
+ tristate "OCTEON Boot Bus Compact Flash support"
+ depends on CPU_CAVIUM_OCTEON
+ help
+ This option enables a polled compact flash driver for use with
+ compact flash cards attached to the OCTEON boot bus.
+
+ If unsure, say N.
+
+config SATA_QSTOR
+ tristate "Pacific Digital SATA QStor support"
+ depends on PCI
+ help
+ This option enables support for Pacific Digital Serial ATA QStor.
+
+ If unsure, say N.
+
+config SATA_SX4
+ tristate "Promise SATA SX4 support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for Promise Serial ATA SX4.
If unsure, say N.
+config ATA_BMDMA
+ bool "ATA BMDMA support"
+ default y
+ help
+ This option adds support for SFF ATA controllers with BMDMA
+ capability. BMDMA stands for bus-master DMA and is the
+ de facto DMA interface for SFF controllers.
+
+ If unsure, say Y.
+
+if ATA_BMDMA
+
+comment "SATA SFF controllers with BMDMA"
+
config ATA_PIIX
tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support"
depends on PCI
@@ -152,22 +204,6 @@ config SATA_NV
If unsure, say N.
-config PDC_ADMA
- tristate "Pacific Digital ADMA support"
- depends on PCI
- help
- This option enables support for Pacific Digital ADMA controllers
-
- If unsure, say N.
-
-config SATA_QSTOR
- tristate "Pacific Digital SATA QStor support"
- depends on PCI
- help
- This option enables support for Pacific Digital Serial ATA QStor.
-
- If unsure, say N.
-
config SATA_PROMISE
tristate "Promise SATA TX2/TX4 support"
depends on PCI
@@ -176,14 +212,6 @@ config SATA_PROMISE
If unsure, say N.
-config SATA_SX4
- tristate "Promise SATA SX4 support (Experimental)"
- depends on PCI && EXPERIMENTAL
- help
- This option enables support for Promise Serial ATA SX4.
-
- If unsure, say N.
-
config SATA_SIL
tristate "Silicon Image SATA support"
depends on PCI
@@ -203,6 +231,15 @@ config SATA_SIS
enable the PATA_SIS driver in the config.
If unsure, say N.
+config SATA_SVW
+ tristate "ServerWorks Frodo / Apple K2 SATA support"
+ depends on PCI
+ help
+ This option enables support for Broadcom/Serverworks/Apple K2
+ SATA support.
+
+ If unsure, say N.
+
config SATA_ULI
tristate "ULi Electronics SATA support"
depends on PCI
@@ -227,14 +264,7 @@ config SATA_VITESSE
If unsure, say N.
-config PATA_ACPI
- tristate "ACPI firmware driver for PATA"
- depends on ATA_ACPI
- help
- This option enables an ACPI method driver which drives
- motherboard PATA controller interfaces through the ACPI
- firmware in the BIOS. This driver can sometimes handle
- otherwise unsupported hardware.
+comment "PATA SFF controllers with BMDMA"
config PATA_ALI
tristate "ALi PATA support"
@@ -262,40 +292,30 @@ config PATA_ARTOP
If unsure, say N.
-config PATA_ATP867X
- tristate "ARTOP/Acard ATP867X PATA support"
+config PATA_ATIIXP
+ tristate "ATI PATA support"
depends on PCI
help
- This option enables support for ARTOP/Acard ATP867X PATA
- controllers.
-
- If unsure, say N.
-
-config PATA_AT32
- tristate "Atmel AVR32 PATA support (Experimental)"
- depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
- help
- This option enables support for the IDE devices on the
- Atmel AT32AP platform.
+ This option enables support for the ATI ATA interfaces
+ found on the many ATI chipsets.
If unsure, say N.
-config PATA_ATIIXP
- tristate "ATI PATA support"
+config PATA_ATP867X
+ tristate "ARTOP/Acard ATP867X PATA support"
depends on PCI
help
- This option enables support for the ATI ATA interfaces
- found on the many ATI chipsets.
+ This option enables support for ARTOP/Acard ATP867X PATA
+ controllers.
If unsure, say N.
-config PATA_CMD640_PCI
- tristate "CMD640 PCI PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+config PATA_BF54X
+ tristate "Blackfin 54x ATAPI support"
+ depends on BF542 || BF548 || BF549
help
- This option enables support for the CMD640 PCI IDE
- interface chip. Only the primary channel is currently
- supported.
+ This option enables support for the built-in ATAPI controller on
+ Blackfin 54x family chips.
If unsure, say N.
@@ -362,15 +382,6 @@ config PATA_EFAR
If unsure, say N.
-config ATA_GENERIC
- tristate "Generic ATA support"
- depends on PCI
- help
- This option enables support for generic BIOS configured
- ATA controllers via the new ATA layer
-
- If unsure, say N.
-
config PATA_HPT366
tristate "HPT 366/368 PATA support"
depends on PCI
@@ -415,12 +426,20 @@ config PATA_HPT3X3_DMA
controllers. Enable with care as there are still some
problems with DMA on this chipset.
-config PATA_ISAPNP
- tristate "ISA Plug and Play PATA support"
- depends on ISAPNP
+config PATA_ICSIDE
+ tristate "Acorn ICS PATA support"
+ depends on ARM && ARCH_ACORN
help
- This option enables support for ISA plug & play ATA
- controllers such as those found on old soundcards.
+ On Acorn systems, say Y here if you wish to use the ICS PATA
+ interface card. This is not required for ICS partition support.
+ If you are unsure, say N to this.
+
+config PATA_IT8213
+ tristate "IT8213 PATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the ITE 821 PATA
+ controllers via the new ATA layer.
If unsure, say N.
@@ -434,15 +453,6 @@ config PATA_IT821X
If unsure, say N.
-config PATA_IT8213
- tristate "IT8213 PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
- help
- This option enables support for the ITE 821 PATA
- controllers via the new ATA layer.
-
- If unsure, say N.
-
config PATA_JMICRON
tristate "JMicron PATA support"
depends on PCI
@@ -452,23 +462,14 @@ config PATA_JMICRON
If unsure, say N.
-config PATA_LEGACY
- tristate "Legacy ISA PATA support (Experimental)"
- depends on (ISA || PCI) && EXPERIMENTAL
- help
- This option enables support for ISA/VLB/PCI bus legacy PATA
- ports and allows them to be accessed via the new ATA layer.
-
- If unsure, say N.
-
-config PATA_TRIFLEX
- tristate "Compaq Triflex PATA support"
- depends on PCI
+config PATA_MACIO
+ tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
+ depends on PPC_PMAC
help
- Enable support for the Compaq 'Triflex' IDE controller as found
- on many Compaq Pentium-Pro systems, via the new ATA layer.
-
- If unsure, say N.
+ Most IDE capable PowerMacs have IDE busses driven by a variant
+ of this controller which is part of the Apple chipset used on
+ most PowerMac models. Some models have multiple busses using
+ different chipsets, though generally, MacIO is one of them.
config PATA_MARVELL
tristate "Marvell PATA support via legacy mode"
@@ -481,32 +482,6 @@ config PATA_MARVELL
If unsure, say N.
-config PATA_MPC52xx
- tristate "Freescale MPC52xx SoC internal IDE"
- depends on PPC_MPC52xx && PPC_BESTCOMM
- select PPC_BESTCOMM_ATA
- help
- This option enables support for integrated IDE controller
- of the Freescale MPC52xx SoC.
-
- If unsure, say N.
-
-config PATA_MPIIX
- tristate "Intel PATA MPIIX support"
- depends on PCI
- help
- This option enables support for MPIIX PATA support.
-
- If unsure, say N.
-
-config PATA_OLDPIIX
- tristate "Intel PATA old PIIX support"
- depends on PCI
- help
- This option enables support for early PIIX PATA support.
-
- If unsure, say N.
-
config PATA_NETCELL
tristate "NETCELL Revolution RAID support"
depends on PCI
@@ -525,15 +500,6 @@ config PATA_NINJA32
If unsure, say N.
-config PATA_NS87410
- tristate "Nat Semi NS87410 PATA support"
- depends on PCI
- help
- This option enables support for the National Semiconductor
- NS87410 PCI-IDE controller.
-
- If unsure, say N.
-
config PATA_NS87415
tristate "Nat Semi NS87415 PATA support"
depends on PCI
@@ -543,12 +509,11 @@ config PATA_NS87415
If unsure, say N.
-config PATA_OPTI
- tristate "OPTI621/6215 PATA support (Very Experimental)"
- depends on PCI && EXPERIMENTAL
+config PATA_OLDPIIX
+ tristate "Intel PATA old PIIX support"
+ depends on PCI
help
- This option enables full PIO support for the early Opti ATA
- controllers found on some old motherboards.
+ This option enables support for early PIIX PATA support.
If unsure, say N.
@@ -562,24 +527,6 @@ config PATA_OPTIDMA
If unsure, say N.
-config PATA_PALMLD
- tristate "Palm LifeDrive PATA support"
- depends on MACH_PALMLD
- help
- This option enables support for Palm LifeDrive's internal ATA
- port via the new ATA layer.
-
- If unsure, say N.
-
-config PATA_PCMCIA
- tristate "PCMCIA PATA support"
- depends on PCMCIA
- help
- This option enables support for PCMCIA ATA interfaces, including
- compact flash card adapters via the new ATA layer.
-
- If unsure, say N.
-
config PATA_PDC2027X
tristate "Promise PATA 2027x support"
depends on PCI
@@ -597,12 +544,6 @@ config PATA_PDC_OLD
If unsure, say N.
-config PATA_QDI
- tristate "QDI VLB PATA support"
- depends on ISA
- help
- Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
-
config PATA_RADISYS
tristate "RADISYS 82600 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
@@ -612,15 +553,6 @@ config PATA_RADISYS
If unsure, say N.
-config PATA_RB532
- tristate "RouterBoard 532 PATA CompactFlash support"
- depends on MIKROTIK_RB532
- help
- This option enables support for the RouterBoard 532
- PATA CompactFlash controller.
-
- If unsure, say N.
-
config PATA_RDC
tristate "RDC PATA support"
depends on PCI
@@ -631,21 +563,30 @@ config PATA_RDC
If unsure, say N.
-config PATA_RZ1000
- tristate "PC Tech RZ1000 PATA support"
+config PATA_SC1200
+ tristate "SC1200 PATA support"
depends on PCI
help
- This option enables basic support for the PC Tech RZ1000/1
- PATA controllers via the new ATA layer
+ This option enables support for the NatSemi/AMD SC1200 SoC
+ companion chip used with the Geode processor family.
If unsure, say N.
-config PATA_SC1200
- tristate "SC1200 PATA support"
+config PATA_SCC
+ tristate "Toshiba's Cell Reference Set IDE support"
+ depends on PCI && PPC_CELLEB
+ help
+ This option enables support for the built-in IDE controller on
+ Toshiba Cell Reference Board.
+
+ If unsure, say N.
+
+config PATA_SCH
+ tristate "Intel SCH PATA support"
depends on PCI
help
- This option enables support for the NatSemi/AMD SC1200 SoC
- companion chip used with the Geode processor family.
+ This option enables support for Intel SCH PATA on the Intel
+ SCH (US15W, US15L, UL11L) series host controllers.
If unsure, say N.
@@ -683,6 +624,15 @@ config PATA_TOSHIBA
If unsure, say N.
+config PATA_TRIFLEX
+ tristate "Compaq Triflex PATA support"
+ depends on PCI
+ help
+ Enable support for the Compaq 'Triflex' IDE controller as found
+ on many Compaq Pentium-Pro systems, via the new ATA layer.
+
+ If unsure, say N.
+
config PATA_VIA
tristate "VIA PATA support"
depends on PCI
@@ -701,12 +651,99 @@ config PATA_WINBOND
If unsure, say N.
-config PATA_WINBOND_VLB
- tristate "Winbond W83759A VLB PATA support (Experimental)"
- depends on ISA && EXPERIMENTAL
+endif # ATA_BMDMA
+
+comment "PIO-only SFF controllers"
+
+config PATA_AT32
+ tristate "Atmel AVR32 PATA support (Experimental)"
+ depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
help
- Support for the Winbond W83759A controller on Vesa Local Bus
- systems.
+ This option enables support for the IDE devices on the
+ Atmel AT32AP platform.
+
+ If unsure, say N.
+
+config PATA_AT91
+ tristate "PATA support for AT91SAM9260"
+ depends on ARM && ARCH_AT91
+ help
+ This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
+
+ If unsure, say N.
+
+config PATA_CMD640_PCI
+ tristate "CMD640 PCI PATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the CMD640 PCI IDE
+ interface chip. Only the primary channel is currently
+ supported.
+
+ If unsure, say N.
+
+config PATA_ISAPNP
+ tristate "ISA Plug and Play PATA support"
+ depends on ISAPNP
+ help
+ This option enables support for ISA plug & play ATA
+ controllers such as those found on old soundcards.
+
+ If unsure, say N.
+
+config PATA_IXP4XX_CF
+ tristate "IXP4XX Compact Flash support"
+ depends on ARCH_IXP4XX
+ help
+ This option enables support for a Compact Flash connected on
+ the ixp4xx expansion bus. This driver had been written for
+ Loft/Avila boards in mind but can work with others.
+
+ If unsure, say N.
+
+config PATA_MPIIX
+ tristate "Intel PATA MPIIX support"
+ depends on PCI
+ help
+ This option enables support for MPIIX PATA support.
+
+ If unsure, say N.
+
+config PATA_NS87410
+ tristate "Nat Semi NS87410 PATA support"
+ depends on PCI
+ help
+ This option enables support for the National Semiconductor
+ NS87410 PCI-IDE controller.
+
+ If unsure, say N.
+
+config PATA_OPTI
+ tristate "OPTI621/6215 PATA support (Very Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables full PIO support for the early Opti ATA
+ controllers found on some old motherboards.
+
+ If unsure, say N.
+
+config PATA_PALMLD
+ tristate "Palm LifeDrive PATA support"
+ depends on MACH_PALMLD
+ help
+ This option enables support for Palm LifeDrive's internal ATA
+ port via the new ATA layer.
+
+ If unsure, say N.
+
+config PATA_PCMCIA
+ tristate "PCMCIA PATA support"
+ depends on PCMCIA
+ help
+ This option enables support for PCMCIA ATA interfaces, including
+ compact flash card adapters via the new ATA layer.
+
+ If unsure, say N.
config HAVE_PATA_PLATFORM
bool
@@ -725,14 +762,6 @@ config PATA_PLATFORM
If unsure, say N.
-config PATA_AT91
- tristate "PATA support for AT91SAM9260"
- depends on ARM && ARCH_AT91
- help
- This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
-
- If unsure, say N.
-
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
depends on PATA_PLATFORM && PPC_OF
@@ -743,69 +772,65 @@ config PATA_OF_PLATFORM
If unsure, say N.
-config PATA_ICSIDE
- tristate "Acorn ICS PATA support"
- depends on ARM && ARCH_ACORN
+config PATA_QDI
+ tristate "QDI VLB PATA support"
+ depends on ISA
help
- On Acorn systems, say Y here if you wish to use the ICS PATA
- interface card. This is not required for ICS partition support.
- If you are unsure, say N to this.
+ Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
-config PATA_IXP4XX_CF
- tristate "IXP4XX Compact Flash support"
- depends on ARCH_IXP4XX
+config PATA_RB532
+ tristate "RouterBoard 532 PATA CompactFlash support"
+ depends on MIKROTIK_RB532
help
- This option enables support for a Compact Flash connected on
- the ixp4xx expansion bus. This driver had been written for
- Loft/Avila boards in mind but can work with others.
+ This option enables support for the RouterBoard 532
+ PATA CompactFlash controller.
If unsure, say N.
-config PATA_OCTEON_CF
- tristate "OCTEON Boot Bus Compact Flash support"
- depends on CPU_CAVIUM_OCTEON
+config PATA_RZ1000
+ tristate "PC Tech RZ1000 PATA support"
+ depends on PCI
help
- This option enables a polled compact flash driver for use with
- compact flash cards attached to the OCTEON boot bus.
+ This option enables basic support for the PC Tech RZ1000/1
+ PATA controllers via the new ATA layer
If unsure, say N.
-config PATA_SCC
- tristate "Toshiba's Cell Reference Set IDE support"
- depends on PCI && PPC_CELLEB
+config PATA_WINBOND_VLB
+ tristate "Winbond W83759A VLB PATA support (Experimental)"
+ depends on ISA && EXPERIMENTAL
help
- This option enables support for the built-in IDE controller on
- Toshiba Cell Reference Board.
+ Support for the Winbond W83759A controller on Vesa Local Bus
+ systems.
- If unsure, say N.
+comment "Generic fallback / legacy drivers"
-config PATA_SCH
- tristate "Intel SCH PATA support"
- depends on PCI
+config PATA_ACPI
+ tristate "ACPI firmware driver for PATA"
+ depends on ATA_ACPI && ATA_BMDMA
help
- This option enables support for Intel SCH PATA on the Intel
- SCH (US15W, US15L, UL11L) series host controllers.
-
- If unsure, say N.
+ This option enables an ACPI method driver which drives
+ motherboard PATA controller interfaces through the ACPI
+ firmware in the BIOS. This driver can sometimes handle
+ otherwise unsupported hardware.
-config PATA_BF54X
- tristate "Blackfin 54x ATAPI support"
- depends on BF542 || BF548 || BF549
+config ATA_GENERIC
+ tristate "Generic ATA support"
+ depends on PCI && ATA_BMDMA
help
- This option enables support for the built-in ATAPI controller on
- Blackfin 54x family chips.
+ This option enables support for generic BIOS configured
+ ATA controllers via the new ATA layer
If unsure, say N.
-config PATA_MACIO
- tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
- depends on PPC_PMAC
+config PATA_LEGACY
+ tristate "Legacy ISA PATA support (Experimental)"
+ depends on (ISA || PCI) && EXPERIMENTAL
help
- Most IDE capable PowerMacs have IDE busses driven by a variant
- of this controller which is part of the Apple chipset used on
- most PowerMac models. Some models have multiple busses using
- different chipsets, though generally, MacIO is one of them.
+ This option enables support for ISA/VLB/PCI bus legacy PATA
+ ports and allows them to be accessed via the new ATA layer.
+ If unsure, say N.
endif # ATA_SFF
endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index d0a93c4ad3ec..7ef89d73df63 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -1,33 +1,39 @@
obj-$(CONFIG_ATA) += libata.o
+# non-SFF interface
obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
-obj-$(CONFIG_SATA_SVW) += sata_svw.o
+obj-$(CONFIG_SATA_FSL) += sata_fsl.o
+obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
+obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
+
+# SFF w/ custom DMA
+obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
+obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
+obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
+obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
+obj-$(CONFIG_SATA_SX4) += sata_sx4.o
+
+# SFF SATA w/ BMDMA
obj-$(CONFIG_ATA_PIIX) += ata_piix.o
+obj-$(CONFIG_SATA_MV) += sata_mv.o
+obj-$(CONFIG_SATA_NV) += sata_nv.o
obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
-obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
obj-$(CONFIG_SATA_SIL) += sata_sil.o
-obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
-obj-$(CONFIG_SATA_VIA) += sata_via.o
-obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
obj-$(CONFIG_SATA_SIS) += sata_sis.o
-obj-$(CONFIG_SATA_SX4) += sata_sx4.o
-obj-$(CONFIG_SATA_NV) += sata_nv.o
+obj-$(CONFIG_SATA_SVW) += sata_svw.o
obj-$(CONFIG_SATA_ULI) += sata_uli.o
-obj-$(CONFIG_SATA_MV) += sata_mv.o
-obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
-obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
-obj-$(CONFIG_SATA_FSL) += sata_fsl.o
-obj-$(CONFIG_PATA_MACIO) += pata_macio.o
+obj-$(CONFIG_SATA_VIA) += sata_via.o
+obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
+# SFF PATA w/ BMDMA
obj-$(CONFIG_PATA_ALI) += pata_ali.o
obj-$(CONFIG_PATA_AMD) += pata_amd.o
obj-$(CONFIG_PATA_ARTOP) += pata_artop.o
-obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o
-obj-$(CONFIG_PATA_AT32) += pata_at32.o
obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o
-obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
+obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o
+obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o
obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o
obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o
@@ -39,47 +45,50 @@ obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o
obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
-obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
-obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
+obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
+obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
+obj-$(CONFIG_PATA_MACIO) += pata_macio.o
+obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o
-obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o
-obj-$(CONFIG_PATA_OPTI) += pata_opti.o
-obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
-obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
-obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
-obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
-obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
-obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
+obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
-obj-$(CONFIG_PATA_QDI) += pata_qdi.o
obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
-obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
obj-$(CONFIG_PATA_RDC) += pata_rdc.o
-obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
+obj-$(CONFIG_PATA_SCC) += pata_scc.o
+obj-$(CONFIG_PATA_SCH) += pata_sch.o
obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
+obj-$(CONFIG_PATA_SIS) += pata_sis.o
obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o
+obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
obj-$(CONFIG_PATA_VIA) += pata_via.o
obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
-obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
-obj-$(CONFIG_PATA_SIS) += pata_sis.o
-obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
+
+# SFF PIO only
+obj-$(CONFIG_PATA_AT32) += pata_at32.o
+obj-$(CONFIG_PATA_AT91) += pata_at91.o
+obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
+obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
-obj-$(CONFIG_PATA_SCC) += pata_scc.o
-obj-$(CONFIG_PATA_SCH) += pata_sch.o
-obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
-obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
+obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
+obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
+obj-$(CONFIG_PATA_OPTI) += pata_opti.o
+obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
+obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
-obj-$(CONFIG_PATA_AT91) += pata_at91.o
obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
-obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
+obj-$(CONFIG_PATA_QDI) += pata_qdi.o
+obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
+obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
+obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
+
# Should be last but two libata driver
obj-$(CONFIG_PATA_ACPI) += pata_acpi.o
# Should be last but one libata driver
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 33fb614f9784..573158a9668d 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -155,7 +155,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
return rc;
pcim_pin_device(dev);
}
- return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, NULL, 0);
}
static struct pci_device_id ata_generic[] = {
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ec52fc618763..7409f98d2ae6 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1589,7 +1589,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
hpriv->map = piix_init_sata_map(pdev, port_info,
piix_map_db_table[ent->driver_data]);
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
host->private_data = hpriv;
@@ -1626,7 +1626,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
host->flags |= ATA_HOST_PARALLEL_SCAN;
pci_set_master(pdev);
- return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht);
+ return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht);
}
static void piix_remove_one(struct pci_dev *pdev)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 1984a6e89e84..261f86d102e8 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -541,29 +541,11 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
return -EINVAL;
}
-static int ahci_is_device_present(void __iomem *port_mmio)
-{
- u8 status = readl(port_mmio + PORT_TFDATA) & 0xff;
-
- /* Make sure PxTFD.STS.BSY and PxTFD.STS.DRQ are 0 */
- if (status & (ATA_BUSY | ATA_DRQ))
- return 0;
-
- /* Make sure PxSSTS.DET is 3h */
- status = readl(port_mmio + PORT_SCR_STAT) & 0xf;
- if (status != 3)
- return 0;
- return 1;
-}
-
void ahci_start_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
- if (!ahci_is_device_present(port_mmio))
- return;
-
/* start DMA */
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_START;
@@ -1892,6 +1874,9 @@ static void ahci_error_handler(struct ata_port *ap)
}
sata_pmp_error_handler(ap);
+
+ if (!ata_dev_enabled(ap->link.device))
+ ahci_stop_engine(ap);
}
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c47373f01f89..ddf8e4862787 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -160,6 +160,10 @@ int libata_allow_tpm = 0;
module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
+static int atapi_an;
+module_param(atapi_an, int, 0444);
+MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
+
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
@@ -2122,6 +2126,14 @@ retry:
goto err_out;
}
+ if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
+ ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
+ "class=%d may_fallback=%d tried_spinup=%d\n",
+ class, may_fallback, tried_spinup);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
+ 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
+ }
+
/* Falling back doesn't make sense if ID data was read
* successfully at least once.
*/
@@ -2510,7 +2522,8 @@ int ata_dev_configure(struct ata_device *dev)
* to enable ATAPI AN to discern between PHY status
* changed notifications and ATAPI ANs.
*/
- if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
+ if (atapi_an &&
+ (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
(!sata_pmp_attached(ap) ||
sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
unsigned int err_mask;
@@ -4106,9 +4119,8 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
ata_dev_printk(dev, KERN_WARNING,
"new n_sectors matches native, probably "
- "late HPA unlock, continuing\n");
- /* keep using the old n_sectors */
- dev->n_sectors = n_sectors;
+ "late HPA unlock, n_sectors updated\n");
+ /* use the larger n_sectors */
return 0;
}
@@ -6372,6 +6384,7 @@ static int __init ata_parse_force_one(char **cur,
{ "3.0Gbps", .spd_limit = 2 },
{ "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
{ "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
+ { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
{ "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
{ "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
{ "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
@@ -6655,6 +6668,7 @@ EXPORT_SYMBOL_GPL(ata_dummy_port_info);
EXPORT_SYMBOL_GPL(ata_link_next);
EXPORT_SYMBOL_GPL(ata_dev_next);
EXPORT_SYMBOL_GPL(ata_std_bios_param);
+EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
EXPORT_SYMBOL_GPL(ata_host_init);
EXPORT_SYMBOL_GPL(ata_host_alloc);
EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index cfa9dd3d7253..a54273d2c3c6 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -415,6 +415,35 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
}
/**
+ * ata_scsi_unlock_native_capacity - unlock native capacity
+ * @sdev: SCSI device to adjust device capacity for
+ *
+ * This function is called if a partition on @sdev extends beyond
+ * the end of the device. It requests EH to unlock HPA.
+ *
+ * LOCKING:
+ * Defined by the SCSI layer. Might sleep.
+ */
+void ata_scsi_unlock_native_capacity(struct scsi_device *sdev)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct ata_device *dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (dev && dev->n_sectors < dev->n_native_sectors) {
+ dev->flags |= ATA_DFLAG_UNLOCK_HPA;
+ dev->link->eh_info.action |= ATA_EH_RESET;
+ ata_port_schedule_eh(ap);
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ ata_port_wait_eh(ap);
+}
+
+/**
* ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
* @ap: target port
* @sdev: SCSI device to get identify data for
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 19ddf924944f..efa4a18cfb9d 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -63,7 +63,6 @@ const struct ata_port_operations ata_sff_port_ops = {
.sff_tf_read = ata_sff_tf_read,
.sff_exec_command = ata_sff_exec_command,
.sff_data_xfer = ata_sff_data_xfer,
- .sff_irq_clear = ata_sff_irq_clear,
.sff_drain_fifo = ata_sff_drain_fifo,
.lost_interrupt = ata_sff_lost_interrupt,
@@ -395,33 +394,12 @@ void ata_sff_irq_on(struct ata_port *ap)
ata_sff_set_devctl(ap, ap->ctl);
ata_wait_idle(ap);
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_irq_on);
/**
- * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
- * @ap: Port associated with this ATA transaction.
- *
- * Clear interrupt and error flags in DMA status register.
- *
- * May be used as the irq_clear() entry in ata_port_operations.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_sff_irq_clear(struct ata_port *ap)
-{
- void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
- if (!mmio)
- return;
-
- iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
-}
-EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
-
-/**
* ata_sff_tf_load - send taskfile registers to host controller
* @ap: Port to which output is sent
* @tf: ATA taskfile register set
@@ -820,11 +798,15 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
case ATAPI_PROT_NODATA:
ap->hsm_task_state = HSM_ST_LAST;
break;
+#ifdef CONFIG_ATA_BMDMA
case ATAPI_PROT_DMA:
ap->hsm_task_state = HSM_ST_LAST;
/* initiate bmdma */
ap->ops->bmdma_start(qc);
break;
+#endif /* CONFIG_ATA_BMDMA */
+ default:
+ BUG();
}
}
@@ -1491,27 +1473,27 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
}
EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
-/**
- * ata_sff_host_intr - Handle host interrupt for given (port, task)
- * @ap: Port on which interrupt arrived (possibly...)
- * @qc: Taskfile currently active in engine
- *
- * Handle host interrupt for given queued command. Currently,
- * only DMA interrupts are handled. All other commands are
- * handled via polling with interrupts disabled (nIEN bit).
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- * RETURNS:
- * One if interrupt was handled, zero if not (shared irq).
- */
-unsigned int ata_sff_host_intr(struct ata_port *ap,
- struct ata_queued_cmd *qc)
+static unsigned int ata_sff_idle_irq(struct ata_port *ap)
{
- struct ata_eh_info *ehi = &ap->link.eh_info;
- u8 status, host_stat = 0;
- bool bmdma_stopped = false;
+ ap->stats.idle_irq++;
+
+#ifdef ATA_IRQ_TRAP
+ if ((ap->stats.idle_irq % 1000) == 0) {
+ ap->ops->sff_check_status(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
+ ata_port_printk(ap, KERN_WARNING, "irq trap\n");
+ return 1;
+ }
+#endif
+ return 0; /* irq not handled */
+}
+
+static unsigned int __ata_sff_port_intr(struct ata_port *ap,
+ struct ata_queued_cmd *qc,
+ bool hsmv_on_idle)
+{
+ u8 status;
VPRINTK("ata%u: protocol %d task_state %d\n",
ap->print_id, qc->tf.protocol, ap->hsm_task_state);
@@ -1528,90 +1510,56 @@ unsigned int ata_sff_host_intr(struct ata_port *ap,
* need to check ata_is_atapi(qc->tf.protocol) again.
*/
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- goto idle_irq;
- break;
- case HSM_ST_LAST:
- if (qc->tf.protocol == ATA_PROT_DMA ||
- qc->tf.protocol == ATAPI_PROT_DMA) {
- /* check status of DMA engine */
- host_stat = ap->ops->bmdma_status(ap);
- VPRINTK("ata%u: host_stat 0x%X\n",
- ap->print_id, host_stat);
-
- /* if it's not our irq... */
- if (!(host_stat & ATA_DMA_INTR))
- goto idle_irq;
-
- /* before we do anything else, clear DMA-Start bit */
- ap->ops->bmdma_stop(qc);
- bmdma_stopped = true;
-
- if (unlikely(host_stat & ATA_DMA_ERR)) {
- /* error when transfering data to/from memory */
- qc->err_mask |= AC_ERR_HOST_BUS;
- ap->hsm_task_state = HSM_ST_ERR;
- }
- }
+ return ata_sff_idle_irq(ap);
break;
case HSM_ST:
+ case HSM_ST_LAST:
break;
default:
- goto idle_irq;
+ return ata_sff_idle_irq(ap);
}
-
/* check main status, clearing INTRQ if needed */
status = ata_sff_irq_status(ap);
if (status & ATA_BUSY) {
- if (bmdma_stopped) {
+ if (hsmv_on_idle) {
/* BMDMA engine is already stopped, we're screwed */
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
} else
- goto idle_irq;
+ return ata_sff_idle_irq(ap);
}
/* clear irq events */
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
ata_sff_hsm_move(ap, qc, status, 0);
- if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
- qc->tf.protocol == ATAPI_PROT_DMA))
- ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
-
return 1; /* irq handled */
-
-idle_irq:
- ap->stats.idle_irq++;
-
-#ifdef ATA_IRQ_TRAP
- if ((ap->stats.idle_irq % 1000) == 0) {
- ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
- ata_port_printk(ap, KERN_WARNING, "irq trap\n");
- return 1;
- }
-#endif
- return 0; /* irq not handled */
}
-EXPORT_SYMBOL_GPL(ata_sff_host_intr);
/**
- * ata_sff_interrupt - Default ATA host interrupt handler
- * @irq: irq line (unused)
- * @dev_instance: pointer to our ata_host information structure
+ * ata_sff_port_intr - Handle SFF port interrupt
+ * @ap: Port on which interrupt arrived (possibly...)
+ * @qc: Taskfile currently active in engine
*
- * Default interrupt handler for PCI IDE devices. Calls
- * ata_sff_host_intr() for each port that is not disabled.
+ * Handle port interrupt for given queued command.
*
* LOCKING:
- * Obtains host lock during operation.
+ * spin_lock_irqsave(host lock)
*
* RETURNS:
- * IRQ_NONE or IRQ_HANDLED.
+ * One if interrupt was handled, zero if not (shared irq).
*/
-irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
+unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ return __ata_sff_port_intr(ap, qc, false);
+}
+EXPORT_SYMBOL_GPL(ata_sff_port_intr);
+
+static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
+ unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
{
struct ata_host *host = dev_instance;
bool retried = false;
@@ -1631,7 +1579,7 @@ retry:
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc) {
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
- handled |= ata_sff_host_intr(ap, qc);
+ handled |= port_intr(ap, qc);
else
polling |= 1 << i;
} else
@@ -1658,7 +1606,8 @@ retry:
if (idle & (1 << i)) {
ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
} else {
/* clear INTRQ and check if BUSY cleared */
if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
@@ -1680,6 +1629,25 @@ retry:
return IRQ_RETVAL(handled);
}
+
+/**
+ * ata_sff_interrupt - Default SFF ATA host interrupt handler
+ * @irq: irq line (unused)
+ * @dev_instance: pointer to our ata_host information structure
+ *
+ * Default interrupt handler for PCI IDE devices. Calls
+ * ata_sff_port_intr() for each port that is not disabled.
+ *
+ * LOCKING:
+ * Obtains host lock during operation.
+ *
+ * RETURNS:
+ * IRQ_NONE or IRQ_HANDLED.
+ */
+irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
+{
+ return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
+}
EXPORT_SYMBOL_GPL(ata_sff_interrupt);
/**
@@ -1717,7 +1685,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
status);
/* Run the host interrupt logic as if the interrupt had not been
lost */
- ata_sff_host_intr(ap, qc);
+ ata_sff_port_intr(ap, qc);
}
EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
@@ -1744,7 +1712,8 @@ void ata_sff_freeze(struct ata_port *ap)
*/
ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_freeze);
@@ -1761,7 +1730,8 @@ void ata_sff_thaw(struct ata_port *ap)
{
/* clear & re-enable interrupts */
ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
ata_sff_irq_on(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_thaw);
@@ -2349,13 +2319,13 @@ int ata_pci_sff_init_host(struct ata_host *host)
EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
/**
- * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host
+ * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
* @pdev: target PCI device
* @ppi: array of port_info, must be enough for two ports
* @r_host: out argument for the initialized ATA host
*
- * Helper to allocate ATA host for @pdev, acquire all native PCI
- * resources and initialize it accordingly in one go.
+ * Helper to allocate PIO-only SFF ATA host for @pdev, acquire
+ * all PCI resources and initialize it accordingly in one go.
*
* LOCKING:
* Inherited from calling layer (may sleep).
@@ -2385,9 +2355,6 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev,
if (rc)
goto err_out;
- /* init DMA related stuff */
- ata_pci_bmdma_init(host);
-
devres_remove_group(&pdev->dev, NULL);
*r_host = host;
return 0;
@@ -2492,8 +2459,21 @@ out:
}
EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
+static const struct ata_port_info *ata_sff_find_valid_pi(
+ const struct ata_port_info * const *ppi)
+{
+ int i;
+
+ /* look up the first valid port_info */
+ for (i = 0; i < 2 && ppi[i]; i++)
+ if (ppi[i]->port_ops != &ata_dummy_port_ops)
+ return ppi[i];
+
+ return NULL;
+}
+
/**
- * ata_pci_sff_init_one - Initialize/register PCI IDE host controller
+ * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
* @pdev: Controller to be initialized
* @ppi: array of port_info, must be enough for two ports
* @sht: scsi_host_template to use when registering the host
@@ -2502,11 +2482,7 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
*
* This is a helper function which can be called from a driver's
* xxx_init_one() probe function if the hardware uses traditional
- * IDE taskfile registers.
- *
- * This function calls pci_enable_device(), reserves its register
- * regions, sets the dma mask, enables bus master mode, and calls
- * ata_device_add()
+ * IDE taskfile registers and is PIO only.
*
* ASSUMPTION:
* Nobody makes a single channel controller that appears solely as
@@ -2523,20 +2499,13 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
struct scsi_host_template *sht, void *host_priv, int hflag)
{
struct device *dev = &pdev->dev;
- const struct ata_port_info *pi = NULL;
+ const struct ata_port_info *pi;
struct ata_host *host = NULL;
- int i, rc;
+ int rc;
DPRINTK("ENTER\n");
- /* look up the first valid port_info */
- for (i = 0; i < 2 && ppi[i]; i++) {
- if (ppi[i]->port_ops != &ata_dummy_port_ops) {
- pi = ppi[i];
- break;
- }
- }
-
+ pi = ata_sff_find_valid_pi(ppi);
if (!pi) {
dev_printk(KERN_ERR, &pdev->dev,
"no valid port_info specified\n");
@@ -2557,7 +2526,6 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
host->private_data = host_priv;
host->flags |= hflag;
- pci_set_master(pdev);
rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
out:
if (rc == 0)
@@ -2571,6 +2539,12 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
#endif /* CONFIG_PCI */
+/*
+ * BMDMA support
+ */
+
+#ifdef CONFIG_ATA_BMDMA
+
const struct ata_port_operations ata_bmdma_port_ops = {
.inherits = &ata_sff_port_ops,
@@ -2580,6 +2554,7 @@ const struct ata_port_operations ata_bmdma_port_ops = {
.qc_prep = ata_bmdma_qc_prep,
.qc_issue = ata_bmdma_qc_issue,
+ .sff_irq_clear = ata_bmdma_irq_clear,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
@@ -2804,6 +2779,75 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
/**
+ * ata_bmdma_port_intr - Handle BMDMA port interrupt
+ * @ap: Port on which interrupt arrived (possibly...)
+ * @qc: Taskfile currently active in engine
+ *
+ * Handle port interrupt for given queued command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * One if interrupt was handled, zero if not (shared irq).
+ */
+unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ u8 host_stat = 0;
+ bool bmdma_stopped = false;
+ unsigned int handled;
+
+ if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
+ /* check status of DMA engine */
+ host_stat = ap->ops->bmdma_status(ap);
+ VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
+
+ /* if it's not our irq... */
+ if (!(host_stat & ATA_DMA_INTR))
+ return ata_sff_idle_irq(ap);
+
+ /* before we do anything else, clear DMA-Start bit */
+ ap->ops->bmdma_stop(qc);
+ bmdma_stopped = true;
+
+ if (unlikely(host_stat & ATA_DMA_ERR)) {
+ /* error when transfering data to/from memory */
+ qc->err_mask |= AC_ERR_HOST_BUS;
+ ap->hsm_task_state = HSM_ST_ERR;
+ }
+ }
+
+ handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
+
+ if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
+ ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+
+ return handled;
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
+
+/**
+ * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
+ * @irq: irq line (unused)
+ * @dev_instance: pointer to our ata_host information structure
+ *
+ * Default interrupt handler for PCI IDE devices. Calls
+ * ata_bmdma_port_intr() for each port that is not disabled.
+ *
+ * LOCKING:
+ * Obtains host lock during operation.
+ *
+ * RETURNS:
+ * IRQ_NONE or IRQ_HANDLED.
+ */
+irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
+{
+ return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
+
+/**
* ata_bmdma_error_handler - Stock error handler for BMDMA controller
* @ap: port to handle error for
*
@@ -2848,7 +2892,8 @@ void ata_bmdma_error_handler(struct ata_port *ap)
/* if we're gonna thaw, make sure IRQ is clear */
if (thaw) {
ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
}
}
@@ -2882,6 +2927,28 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
/**
+ * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
+ * @ap: Port associated with this ATA transaction.
+ *
+ * Clear interrupt and error flags in DMA status register.
+ *
+ * May be used as the irq_clear() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_irq_clear(struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ if (!mmio)
+ return;
+
+ iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
+
+/**
* ata_bmdma_setup - Set up PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
@@ -3137,7 +3204,100 @@ void ata_pci_bmdma_init(struct ata_host *host)
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
+/**
+ * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
+ * @pdev: target PCI device
+ * @ppi: array of port_info, must be enough for two ports
+ * @r_host: out argument for the initialized ATA host
+ *
+ * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
+ * resources and initialize it accordingly in one go.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct ata_host **r_host)
+{
+ int rc;
+
+ rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
+ if (rc)
+ return rc;
+
+ ata_pci_bmdma_init(*r_host);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
+
+/**
+ * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
+ * @pdev: Controller to be initialized
+ * @ppi: array of port_info, must be enough for two ports
+ * @sht: scsi_host_template to use when registering the host
+ * @host_priv: host private_data
+ * @hflags: host flags
+ *
+ * This function is similar to ata_pci_sff_init_one() but also
+ * takes care of BMDMA initialization.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, negative on errno-based value on error.
+ */
+int ata_pci_bmdma_init_one(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct scsi_host_template *sht, void *host_priv,
+ int hflags)
+{
+ struct device *dev = &pdev->dev;
+ const struct ata_port_info *pi;
+ struct ata_host *host = NULL;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ pi = ata_sff_find_valid_pi(ppi);
+ if (!pi) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "no valid port_info specified\n");
+ return -EINVAL;
+ }
+
+ if (!devres_open_group(dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ goto out;
+
+ /* prepare and activate BMDMA host */
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+ if (rc)
+ goto out;
+ host->private_data = host_priv;
+ host->flags |= hflags;
+
+ pci_set_master(pdev);
+ rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
+ out:
+ if (rc == 0)
+ devres_remove_group(&pdev->dev, NULL);
+ else
+ devres_release_group(&pdev->dev, NULL);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
+
#endif /* CONFIG_PCI */
+#endif /* CONFIG_ATA_BMDMA */
/**
* ata_sff_port_init - Initialize SFF/BMDMA ATA port
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 066b9f301ed5..c8d47034d5e9 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -260,7 +260,7 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
pcim_pin_device(pdev);
}
- return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0);
}
static const struct pci_device_id pacpi_pci_tbl[] = {
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index f306e10c748d..794ec6e3275d 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -583,7 +583,10 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ppi[0] = &info_20_udma;
}
- return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0);
+ if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask)
+ return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0);
+ else
+ return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index d95eca9c547e..620a07cabe31 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -574,7 +574,7 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* And fire it up */
- return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 4d066d6c30fa..ba43f0f8c880 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -421,7 +421,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
BUG_ON(ppi[0] == NULL);
- return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
}
static const struct pci_device_id artop_pci_tbl[] = {
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 44d88b380ddd..43755616dc5a 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -246,8 +246,8 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i]))
ppi[i] = &ata_dummy_port_info;
- return ata_pci_sff_init_one(pdev, ppi, &atiixp_sht, NULL,
- ATA_HOST_PARALLEL_SCAN);
+ return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
+ ATA_HOST_PARALLEL_SCAN);
}
static const struct pci_device_id atiixp[] = {
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index bb6e0746e07d..95295935dd95 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -525,7 +525,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
pci_set_master(pdev);
- rc = ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &atp867x_sht);
if (rc)
dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n");
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 6422cfd13d0d..9cae65de750e 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1214,7 +1214,7 @@ static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
* bfin_irq_clear - Clear ATAPI interrupt.
* @ap: Port associated with this ATA transaction.
*
- * Note: Original code is ata_sff_irq_clear().
+ * Note: Original code is ata_bmdma_irq_clear().
*/
static void bfin_irq_clear(struct ata_port *ap)
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 4c81a71b8877..9f5da1c7454b 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -367,7 +367,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
#endif
- return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 17c5f346ff01..030952f1f97c 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -221,7 +221,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
continue;
rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
- ata_sff_interrupt, 0, DRV_NAME, host);
+ ata_bmdma_interrupt, 0, DRV_NAME, host);
if (rc)
return rc;
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index e809a4233a81..f792330f0d8e 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -324,7 +324,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ppi[1] = &info_palmax_secondary;
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index a02e6459fdcc..03a93186aa19 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -198,7 +198,7 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
rdmsr(ATAC_CH0D1_PIO, timings, dummy);
if (CS5535_BAD_PIO(timings))
wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
- return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0);
}
static const struct pci_device_id cs5535[] = {
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 914ae3506ff5..21ee23f89e88 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -260,7 +260,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
- return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0);
}
static const struct pci_device_id cs5536[] = {
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 0fcc096b8dac..6d915b063d93 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -138,7 +138,7 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i
if (PCI_FUNC(pdev->devfn) != 1)
return -ENODEV;
- return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0);
}
static const struct pci_device_id cy82c693[] = {
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 3bac0e079691..a08834758ea2 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -277,8 +277,8 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL,
- ATA_HOST_PARALLEL_SCAN);
+ return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL,
+ ATA_HOST_PARALLEL_SCAN);
}
static const struct pci_device_id efar_pci_tbl[] = {
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 8580eb3cd54d..7688868557b9 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -361,7 +361,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
break;
}
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 98b498b6907c..9ae4c0830577 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -987,7 +987,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
}
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0);
}
static const struct pci_device_id hpt37x[] = {
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 8b95aeba0e74..32f3463216b8 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -548,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0);
}
static const struct pci_device_id hpt3x2n[] = {
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 727a81ce4c9f..b63d5e2d4628 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -248,7 +248,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
}
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &hpt3x3_sht);
}
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index b56e8f722d20..9f2889fe43b2 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -470,7 +470,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
}
- return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0,
+ return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0,
&pata_icside_sht);
}
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index f971f0de88e6..4d142a2ab8fd 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -273,7 +273,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0);
}
static const struct pci_device_id it8213_pci_tbl[] = {
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 2bd2b002d14a..bf88f71a21f4 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -933,7 +933,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
ppi[0] = &info_smart;
}
- return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 565e01e6ac7c..cb3babbb7035 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -144,7 +144,7 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
};
const struct ata_port_info *ppi[] = { &info, NULL };
- return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
}
static const struct pci_device_id jmicron_pci_tbl[] = {
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 25df50f51c04..75b49d01780b 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1110,7 +1110,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
/* Start it up */
priv->irq = irq;
- return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0,
+ return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
&pata_macio_sht);
}
@@ -1140,7 +1140,7 @@ static int __devinit pata_macio_attach(struct macio_dev *mdev,
"Failed to allocate private memory\n");
return -ENOMEM;
}
- priv->node = of_node_get(mdev->ofdev.node);
+ priv->node = of_node_get(mdev->ofdev.dev.of_node);
priv->mdev = mdev;
priv->dev = &mdev->ofdev.dev;
@@ -1355,8 +1355,11 @@ static struct of_device_id pata_macio_match[] =
static struct macio_driver pata_macio_driver =
{
- .name = "pata-macio",
- .match_table = pata_macio_match,
+ .driver = {
+ .name = "pata-macio",
+ .owner = THIS_MODULE,
+ .of_match_table = pata_macio_match,
+ },
.probe = pata_macio_attach,
.remove = pata_macio_detach,
#ifdef CONFIG_PM
@@ -1366,9 +1369,6 @@ static struct macio_driver pata_macio_driver =
#ifdef CONFIG_PMAC_MEDIABAY
.mediabay_event = pata_macio_mb_event,
#endif
- .driver = {
- .owner = THIS_MODULE,
- },
};
static const struct pci_device_id pata_macio_pci_match[] = {
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index e8ca02e5a71d..dd38083dcbeb 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -153,7 +153,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
return -ENODEV;
}
#endif
- return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0);
}
static const struct pci_device_id marvell_pci_tbl[] = {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 96b11b604ae0..f087ab55b1df 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -659,7 +659,7 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
/* activate host */
- return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0,
+ return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0,
&mpc52xx_ata_sht);
}
@@ -694,7 +694,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
struct bcom_task *dmatsk = NULL;
/* Get ipb frequency */
- ipb_freq = mpc5xxx_get_bus_frequency(op->node);
+ ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
if (!ipb_freq) {
dev_err(&op->dev, "could not determine IPB bus frequency\n");
return -ENODEV;
@@ -702,7 +702,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
/* Get device base address from device tree, request the region
* and ioremap it. */
- rv = of_address_to_resource(op->node, 0, &res_mem);
+ rv = of_address_to_resource(op->dev.of_node, 0, &res_mem);
if (rv) {
dev_err(&op->dev, "could not determine device base address\n");
return rv;
@@ -735,14 +735,14 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
* The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and
* UDMA modes 0, 1 and 2.
*/
- prop = of_get_property(op->node, "mwdma-mode", &proplen);
+ prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen);
if ((prop) && (proplen >= 4))
mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1);
- prop = of_get_property(op->node, "udma-mode", &proplen);
+ prop = of_get_property(op->dev.of_node, "udma-mode", &proplen);
if ((prop) && (proplen >= 4))
udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1);
- ata_irq = irq_of_parse_and_map(op->node, 0);
+ ata_irq = irq_of_parse_and_map(op->dev.of_node, 0);
if (ata_irq == NO_IRQ) {
dev_err(&op->dev, "error mapping irq\n");
return -EINVAL;
@@ -884,9 +884,6 @@ static struct of_device_id mpc52xx_ata_of_match[] = {
static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
- .match_table = mpc52xx_ata_of_match,
.probe = mpc52xx_ata_probe,
.remove = mpc52xx_ata_remove,
#ifdef CONFIG_PM
@@ -896,6 +893,7 @@ static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = mpc52xx_ata_of_match,
},
};
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 94f979a7f4f7..3eb921c746a1 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -82,7 +82,7 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e
ata_pci_bmdma_clear_simplex(pdev);
/* And let the library code do the work */
- return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0);
}
static const struct pci_device_id netcell_pci_tbl[] = {
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index dd53a66b19e3..cc50bd09aa26 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -149,7 +149,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
ninja32_program(base);
/* FIXME: Should we disable them at remove ? */
- return ata_host_activate(host, dev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, dev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &ninja32_sht);
}
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index fdbba2d76d3e..605f198f958c 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -380,7 +380,7 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e
ns87415_fixup(pdev);
- return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0);
}
static const struct pci_device_id ns87415_pci_tbl[] = {
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 3001109352ea..06ddd91ffeda 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -750,20 +750,6 @@ static void octeon_cf_dev_config(struct ata_device *dev)
}
/*
- * Trap if driver tries to do standard bmdma commands. They are not
- * supported.
- */
-static void unreachable_qc(struct ata_queued_cmd *qc)
-{
- BUG();
-}
-
-static u8 unreachable_port(struct ata_port *ap)
-{
- BUG();
-}
-
-/*
* We don't do ATAPI DMA so return 0.
*/
static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
@@ -804,10 +790,6 @@ static struct ata_port_operations octeon_cf_ops = {
.sff_dev_select = octeon_cf_dev_select,
.sff_irq_on = octeon_cf_irq_on,
.sff_irq_clear = octeon_cf_irq_clear,
- .bmdma_setup = unreachable_qc,
- .bmdma_start = unreachable_qc,
- .bmdma_stop = unreachable_qc,
- .bmdma_status = unreachable_port,
.cable_detect = ata_cable_40wire,
.set_piomode = octeon_cf_set_piomode,
.set_dmamode = octeon_cf_set_dmamode,
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 1f18ad9e4fe1..5a1b82c08be9 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -18,7 +18,7 @@ static int __devinit pata_of_platform_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
int ret;
- struct device_node *dn = ofdev->node;
+ struct device_node *dn = ofdev->dev.of_node;
struct resource io_res;
struct resource ctl_res;
struct resource irq_res;
@@ -91,8 +91,11 @@ static struct of_device_id pata_of_platform_match[] = {
MODULE_DEVICE_TABLE(of, pata_of_platform_match);
static struct of_platform_driver pata_of_platform_driver = {
- .name = "pata_of_platform",
- .match_table = pata_of_platform_match,
+ .driver = {
+ .name = "pata_of_platform",
+ .owner = THIS_MODULE,
+ .of_match_table = pata_of_platform_match,
+ },
.probe = pata_of_platform_probe,
.remove = __devexit_p(pata_of_platform_remove),
};
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 988ef2627be3..b811c1636204 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -248,7 +248,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
}
static const struct pci_device_id oldpiix_pci_tbl[] = {
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 76b7d12b1e8d..0852cd07de08 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -429,7 +429,7 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (optiplus_with_udma(dev))
ppi[0] = &info_82c700_udma;
- return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0);
}
static const struct pci_device_id optidma[] = {
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 09f1f22c0307..b18351122525 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -754,7 +754,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
return -EIO;
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &pdc2027x_sht);
}
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index fa1e2f3bc0fd..c39f213e1bbc 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -337,7 +337,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id
return -ENODEV;
}
}
- return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
}
static const struct pci_device_id pdc202xx[] = {
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index 981615414849..cb01bf9496fe 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -95,7 +95,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id
};
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
/* Just one port for the moment */
- return ata_pci_sff_init_one(dev, ppi, &tosh_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
}
static struct pci_device_id ata_tosh[] = {
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index a5fa388e5398..8574b31f1773 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -227,7 +227,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0);
}
static const struct pci_device_id radisys_pci_tbl[] = {
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 37092cfd7bc6..5fbe9b166c69 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -344,7 +344,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
*/
pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
host->private_data = hpriv;
@@ -354,7 +354,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
host->flags |= ATA_HOST_PARALLEL_SCAN;
pci_set_master(pdev);
- return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht);
+ return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht);
}
static void rdc_remove_one(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 6b5b63a2fd8e..e2c18257adff 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -237,7 +237,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, NULL };
- return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0);
}
static const struct pci_device_id sc1200[] = {
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 6f6193b707cb..d9db3f8d60ef 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -875,7 +875,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes)
* scc_irq_clear - Clear PCI IDE BMDMA interrupt.
* @ap: Port associated with this ATA transaction.
*
- * Note: Original code is ata_sff_irq_clear().
+ * Note: Original code is ata_bmdma_irq_clear().
*/
static void scc_irq_clear (struct ata_port *ap)
@@ -1105,7 +1105,7 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &scc_sht);
}
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index 86b3d0133c7c..e97b32f03a6e 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -179,7 +179,7 @@ static int __devinit sch_init_one(struct pci_dev *pdev,
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &sch_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0);
}
static int __init sch_init(void)
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 43ea389df2b3..86dd714e3e1d 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -460,7 +460,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
ata_pci_bmdma_clear_simplex(pdev);
- return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 43faf106f647..d3190d7ec304 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -374,11 +374,11 @@ static int __devinit sil680_init_one(struct pci_dev *pdev,
ata_sff_std_ports(&host->ports[1]->ioaddr);
/* Register & activate */
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sil680_sht);
use_ioports:
- return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index b6708032f321..60cea13cccce 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -826,7 +826,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
sis_fixup(pdev, chipset);
- return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 733b042a7469..98548f640c8e 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -316,7 +316,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
pci_write_config_dword(dev, 0x40, val);
- return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
}
static const struct pci_device_id sl82c105[] = {
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 48f50600ed2a..0d1f89e571dd 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -201,7 +201,7 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (!printed_version++)
dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0);
}
static const struct pci_device_id triflex[] = {
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 7e3e0a5598b7..5e659885de16 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -627,7 +627,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* We have established the device type, now fire it up */
- return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index a69192b38b43..61c89b54ea23 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1313,7 +1313,7 @@ static int sata_fsl_probe(struct of_device *ofdev,
dev_printk(KERN_INFO, &ofdev->dev,
"Sata FSL Platform/CSB Driver init\n");
- hcr_base = of_iomap(ofdev->node, 0);
+ hcr_base = of_iomap(ofdev->dev.of_node, 0);
if (!hcr_base)
goto error_exit_with_cleanup;
@@ -1332,7 +1332,7 @@ static int sata_fsl_probe(struct of_device *ofdev,
host_priv->ssr_base = ssr_base;
host_priv->csr_base = csr_base;
- irq = irq_of_parse_and_map(ofdev->node, 0);
+ irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (irq < 0) {
dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n");
goto error_exit_with_cleanup;
@@ -1427,8 +1427,11 @@ static struct of_device_id fsl_sata_match[] = {
MODULE_DEVICE_TABLE(of, fsl_sata_match);
static struct of_platform_driver fsl_sata_driver = {
- .name = "fsl-sata",
- .match_table = fsl_sata_match,
+ .driver = {
+ .name = "fsl-sata",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_sata_match,
+ },
.probe = sata_fsl_probe,
.remove = sata_fsl_remove,
#ifdef CONFIG_PM
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index f3471bc949d3..a476cd99b95d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -675,8 +675,6 @@ static struct ata_port_operations mv5_ops = {
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
.hardreset = mv_hardreset,
- .error_handler = ata_std_error_handler, /* avoid SFF EH */
- .post_internal_cmd = ATA_OP_NULL,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
@@ -2813,7 +2811,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
} else if (!edma_was_enabled) {
struct ata_queued_cmd *qc = mv_get_active_qc(ap);
if (qc)
- ata_sff_host_intr(ap, qc);
+ ata_bmdma_port_intr(ap, qc);
else
mv_unexpected_intr(ap, edma_was_enabled);
}
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index baa8f0d2c86f..21161136cad0 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -920,7 +920,7 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
}
/* handle interrupt */
- return ata_sff_host_intr(ap, qc);
+ return ata_bmdma_port_intr(ap, qc);
}
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
@@ -1100,7 +1100,7 @@ static void nv_adma_irq_clear(struct ata_port *ap)
u32 notifier_clears[2];
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
return;
}
@@ -1505,7 +1505,7 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
- handled += ata_sff_host_intr(ap, qc);
+ handled += ata_bmdma_port_intr(ap, qc);
} else {
/*
* No request pending? Clear interrupt status
@@ -1669,7 +1669,6 @@ static void nv_mcp55_freeze(struct ata_port *ap)
mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
mask &= ~(NV_INT_ALL_MCP55 << shift);
writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
- ata_sff_freeze(ap);
}
static void nv_mcp55_thaw(struct ata_port *ap)
@@ -1683,7 +1682,6 @@ static void nv_mcp55_thaw(struct ata_port *ap)
mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
mask |= (NV_INT_MASK_MCP55 << shift);
writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
- ata_sff_thaw(ap);
}
static void nv_adma_error_handler(struct ata_port *ap)
@@ -2430,7 +2428,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ppi[0] = &nv_port_info[type];
ipriv = ppi[0]->private_data;
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index d533b3d20ca1..daeebf19a6a9 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -120,8 +120,6 @@ static void qs_host_stop(struct ata_host *host);
static void qs_qc_prep(struct ata_queued_cmd *qc);
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
-static void qs_bmdma_stop(struct ata_queued_cmd *qc);
-static u8 qs_bmdma_status(struct ata_port *ap);
static void qs_freeze(struct ata_port *ap);
static void qs_thaw(struct ata_port *ap);
static int qs_prereset(struct ata_link *link, unsigned long deadline);
@@ -137,8 +135,6 @@ static struct ata_port_operations qs_ata_ops = {
.inherits = &ata_sff_port_ops,
.check_atapi_dma = qs_check_atapi_dma,
- .bmdma_stop = qs_bmdma_stop,
- .bmdma_status = qs_bmdma_status,
.qc_prep = qs_qc_prep,
.qc_issue = qs_qc_issue,
@@ -190,16 +186,6 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
return 1; /* ATAPI DMA not supported */
}
-static void qs_bmdma_stop(struct ata_queued_cmd *qc)
-{
- /* nothing */
-}
-
-static u8 qs_bmdma_status(struct ata_port *ap)
-{
- return 0;
-}
-
static inline void qs_enter_reg_mode(struct ata_port *ap)
{
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
@@ -454,7 +440,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host)
if (!pp || pp->state != qs_state_mmio)
continue;
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
- handled |= ata_sff_host_intr(ap, qc);
+ handled |= ata_sff_port_intr(ap, qc);
}
return handled;
}
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 2dda312b6b9a..3a4f84219719 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -503,7 +503,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
goto err_hsm;
/* ack bmdma irq events */
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
/* kick HSM in the ass */
ata_sff_hsm_move(ap, qc, status, 0);
@@ -584,7 +584,7 @@ static void sil_thaw(struct ata_port *ap)
/* clear IRQ */
ap->ops->sff_check_status(ap);
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
/* turn on SATA IRQ if supported */
if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index e9250514734b..be7726d7686d 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -539,12 +539,12 @@ static void sil24_config_port(struct ata_port *ap)
writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
/* zero error counters. */
- writel(0x8000, port + PORT_DECODE_ERR_THRESH);
- writel(0x8000, port + PORT_CRC_ERR_THRESH);
- writel(0x8000, port + PORT_HSHK_ERR_THRESH);
- writel(0x0000, port + PORT_DECODE_ERR_CNT);
- writel(0x0000, port + PORT_CRC_ERR_CNT);
- writel(0x0000, port + PORT_HSHK_ERR_CNT);
+ writew(0x8000, port + PORT_DECODE_ERR_THRESH);
+ writew(0x8000, port + PORT_CRC_ERR_THRESH);
+ writew(0x8000, port + PORT_HSHK_ERR_THRESH);
+ writew(0x0000, port + PORT_DECODE_ERR_CNT);
+ writew(0x0000, port + PORT_CRC_ERR_CNT);
+ writew(0x0000, port + PORT_HSHK_ERR_CNT);
/* always use 64bit activation */
writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
@@ -622,6 +622,11 @@ static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
+ /*
+ * The barrier is required to ensure that writes to cmd_block reach
+ * the memory before the write to PORT_CMD_ACTIVATE.
+ */
+ wmb();
writel((u32)paddr, port + PORT_CMD_ACTIVATE);
writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
@@ -865,7 +870,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
} else {
prb = &cb->atapi.prb;
sge = cb->atapi.sge;
- memset(cb->atapi.cdb, 0, 32);
+ memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb));
memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
if (ata_is_data(qc->tf.protocol)) {
@@ -895,6 +900,11 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
activate = port + PORT_CMD_ACTIVATE + tag * 8;
+ /*
+ * The barrier is required to ensure that writes to cmd_block reach
+ * the memory before the write to PORT_CMD_ACTIVATE.
+ */
+ wmb();
writel((u32)paddr, activate);
writel((u64)paddr >> 32, activate + 4);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index f8a91bfd66a8..2bfe3ae03976 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -279,7 +279,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
@@ -308,7 +308,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_intx(pdev, 1);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sis_sht);
}
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 101fd6a19829..7d9db4aaf07e 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -502,7 +502,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &k2_sata_sht);
}
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index d8dac17dc2c8..b8578c32d344 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -242,7 +242,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_intx(pdev, 1);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &uli_sht);
}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 08f65492cc81..4730c42a5ee5 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -308,7 +308,7 @@ static void svia_noop_freeze(struct ata_port *ap)
* certain way. Leave it alone and just clear pending IRQ.
*/
ap->ops->sff_check_status(ap);
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
}
/**
@@ -463,7 +463,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
struct ata_host *host;
int rc;
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
*r_host = host;
@@ -520,7 +520,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
struct ata_host *host;
int i, rc;
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
*r_host = host;
@@ -575,6 +575,33 @@ static void svia_configure(struct pci_dev *pdev)
tmp8 |= NATIVE_MODE_ALL;
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
+
+ /*
+ * vt6421 has problems talking to some drives. The following
+ * is the fix from Joseph Chan <JosephChan@via.com.tw>.
+ *
+ * When host issues HOLD, device may send up to 20DW of data
+ * before acknowledging it with HOLDA and the host should be
+ * able to buffer them in FIFO. Unfortunately, some WD drives
+ * send upto 40DW before acknowledging HOLD and, in the
+ * default configuration, this ends up overflowing vt6421's
+ * FIFO, making the controller abort the transaction with
+ * R_ERR.
+ *
+ * Rx52[2] is the internal 128DW FIFO Flow control watermark
+ * adjusting mechanism enable bit and the default value 0
+ * means host will issue HOLD to device when the left FIFO
+ * size goes below 32DW. Setting it to 1 makes the watermark
+ * 64DW.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=15173
+ * http://article.gmane.org/gmane.linux.ide/46352
+ */
+ if (pdev->device == 0x3249) {
+ pci_read_config_byte(pdev, 0x52, &tmp8);
+ tmp8 |= 1 << 2;
+ pci_write_config_byte(pdev, 0x52, tmp8);
+ }
}
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -628,7 +655,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
svia_configure(pdev);
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &svia_sht);
}
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 2107952ebff1..b777176ff494 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -245,7 +245,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap)
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
- handled = ata_sff_host_intr(ap, qc);
+ handled = ata_bmdma_port_intr(ap, qc);
/* We received an interrupt during a polled command,
* or some other spurious condition. Interrupt reporting
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f7d6ebaa0418..da8f176c051e 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -789,7 +789,7 @@ static int __init fore200e_sba_map(struct fore200e *fore200e)
fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
/* get the supported DVMA burst sizes */
- bursts = of_getintprop_default(op->node->parent, "burst-sizes", 0x00);
+ bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
if (sbus_can_dma_64bit())
sbus_set_sbus64(&op->dev, bursts);
@@ -820,18 +820,20 @@ static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_
const u8 *prop;
int len;
- prop = of_get_property(op->node, "madaddrlo2", &len);
+ prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
if (!prop)
return -ENODEV;
memcpy(&prom->mac_addr[4], prop, 4);
- prop = of_get_property(op->node, "madaddrhi4", &len);
+ prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
if (!prop)
return -ENODEV;
memcpy(&prom->mac_addr[2], prop, 4);
- prom->serial_number = of_getintprop_default(op->node, "serialnumber", 0);
- prom->hw_revision = of_getintprop_default(op->node, "promversion", 0);
+ prom->serial_number = of_getintprop_default(op->dev.of_node,
+ "serialnumber", 0);
+ prom->hw_revision = of_getintprop_default(op->dev.of_node,
+ "promversion", 0);
return 0;
}
@@ -841,10 +843,10 @@ static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
struct of_device *op = fore200e->bus_dev;
const struct linux_prom_registers *regs;
- regs = of_get_property(op->node, "reg", NULL);
+ regs = of_get_property(op->dev.of_node, "reg", NULL);
return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n",
- (regs ? regs->which_io : 0), op->node->name);
+ (regs ? regs->which_io : 0), op->dev.of_node->name);
}
#endif /* CONFIG_SBUS */
@@ -2693,8 +2695,11 @@ static const struct of_device_id fore200e_sba_match[] = {
MODULE_DEVICE_TABLE(of, fore200e_sba_match);
static struct of_platform_driver fore200e_sba_driver = {
- .name = "fore_200e",
- .match_table = fore200e_sba_match,
+ .driver = {
+ .name = "fore_200e",
+ .owner = THIS_MODULE,
+ .of_match_table = fore200e_sba_match,
+ },
.probe = fore200e_sba_probe,
.remove = __devexit_p(fore200e_sba_remove),
};
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 3fecfb446d90..5ad3bad2b0a5 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -37,7 +37,7 @@
#define CFAG12864BFB_NAME "cfag12864bfb"
-static struct fb_fix_screeninfo cfag12864bfb_fix __initdata = {
+static struct fb_fix_screeninfo cfag12864bfb_fix __devinitdata = {
.id = "cfag12864b",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO10,
@@ -48,7 +48,7 @@ static struct fb_fix_screeninfo cfag12864bfb_fix __initdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo cfag12864bfb_var __initdata = {
+static struct fb_var_screeninfo cfag12864bfb_var __devinitdata = {
.xres = CFAG12864B_WIDTH,
.yres = CFAG12864B_HEIGHT,
.xres_virtual = CFAG12864B_WIDTH,
@@ -114,7 +114,7 @@ none:
return ret;
}
-static int cfag12864bfb_remove(struct platform_device *device)
+static int __devexit cfag12864bfb_remove(struct platform_device *device)
{
struct fb_info *info = platform_get_drvdata(device);
@@ -128,7 +128,7 @@ static int cfag12864bfb_remove(struct platform_device *device)
static struct platform_driver cfag12864bfb_driver = {
.probe = cfag12864bfb_probe,
- .remove = cfag12864bfb_remove,
+ .remove = __devexit_p(cfag12864bfb_remove),
.driver = {
.name = CFAG12864BFB_NAME,
},
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 057979a19eea..2bdd8a94ec94 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -9,6 +9,7 @@
#include <linux/memory.h>
#include <linux/node.h>
#include <linux/hugetlb.h>
+#include <linux/compaction.h>
#include <linux/cpumask.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
@@ -246,6 +247,8 @@ int register_node(struct node *node, int num, struct node *parent)
scan_unevictable_register_node(node);
hugetlb_register_node(node);
+
+ compaction_register_node(node);
}
return error;
}
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index bf6b13206d00..9fc630ce1ddb 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -162,7 +162,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
topology_remove_dev(cpu);
break;
}
- return rc ? NOTIFY_BAD : NOTIFY_OK;
+ return notifier_from_errno(rc);
}
static int __cpuinit topology_sysfs_init(void)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 77bfce52e9ca..de277689da61 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -76,6 +76,17 @@ config BLK_DEV_XD
It's pretty unlikely that you have one of these: say N.
+config GDROM
+ tristate "SEGA Dreamcast GD-ROM drive"
+ depends on SH_DREAMCAST
+ help
+ A standard SEGA Dreamcast comes with a modified CD ROM drive called a
+ "GD-ROM" by SEGA to signify it is capable of reading special disks
+ with up to 1 GB of data. This drive will also read standard CD ROM
+ disks. Select this option to access any disks in your GD ROM drive.
+ Most users will want to say "Y" here.
+ You can also build this as a module which will be called gdrom.
+
config PARIDE
tristate "Parallel port IDE device support"
depends on PARPORT_PC
@@ -103,17 +114,6 @@ config PARIDE
"MicroSolutions backpack protocol", "DataStor Commuter protocol"
etc.).
-config GDROM
- tristate "SEGA Dreamcast GD-ROM drive"
- depends on SH_DREAMCAST
- help
- A standard SEGA Dreamcast comes with a modified CD ROM drive called a
- "GD-ROM" by SEGA to signify it is capable of reading special disks
- with up to 1 GB of data. This drive will also read standard CD ROM
- disks. Select this option to access any disks in your GD ROM drive.
- Most users will want to say "Y" here.
- You can also build this as a module which will be called gdrom.
-
source "drivers/block/paride/Kconfig"
config BLK_CPQ_DA
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 6081e81d5738..f1bf79d9bc0a 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -133,6 +133,28 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
return page;
}
+static void brd_free_page(struct brd_device *brd, sector_t sector)
+{
+ struct page *page;
+ pgoff_t idx;
+
+ spin_lock(&brd->brd_lock);
+ idx = sector >> PAGE_SECTORS_SHIFT;
+ page = radix_tree_delete(&brd->brd_pages, idx);
+ spin_unlock(&brd->brd_lock);
+ if (page)
+ __free_page(page);
+}
+
+static void brd_zero_page(struct brd_device *brd, sector_t sector)
+{
+ struct page *page;
+
+ page = brd_lookup_page(brd, sector);
+ if (page)
+ clear_highpage(page);
+}
+
/*
* Free all backing store pages and radix tree. This must only be called when
* there are no other users of the device.
@@ -189,6 +211,24 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
return 0;
}
+static void discard_from_brd(struct brd_device *brd,
+ sector_t sector, size_t n)
+{
+ while (n >= PAGE_SIZE) {
+ /*
+ * Don't want to actually discard pages here because
+ * re-allocating the pages can result in writeback
+ * deadlocks under heavy load.
+ */
+ if (0)
+ brd_free_page(brd, sector);
+ else
+ brd_zero_page(brd, sector);
+ sector += PAGE_SIZE >> SECTOR_SHIFT;
+ n -= PAGE_SIZE;
+ }
+}
+
/*
* Copy n bytes from src to the brd starting at sector. Does not sleep.
*/
@@ -300,6 +340,12 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
get_capacity(bdev->bd_disk))
goto out;
+ if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
+ err = 0;
+ discard_from_brd(brd, sector, bio->bi_size);
+ goto out;
+ }
+
rw = bio_rw(bio);
if (rw == READA)
rw = READ;
@@ -320,7 +366,7 @@ out:
}
#ifdef CONFIG_BLK_DEV_XIP
-static int brd_direct_access (struct block_device *bdev, sector_t sector,
+static int brd_direct_access(struct block_device *bdev, sector_t sector,
void **kaddr, unsigned long *pfn)
{
struct brd_device *brd = bdev->bd_disk->private_data;
@@ -437,6 +483,11 @@ static struct brd_device *brd_alloc(int i)
blk_queue_max_hw_sectors(brd->brd_queue, 1024);
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
+ brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
+ brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
+ brd->brd_queue->limits.discard_zeroes_data = 1;
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
+
disk = brd->brd_disk = alloc_disk(1 << part_shift);
if (!disk)
goto out_free_queue;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index e1d0e2cfec72..3381505c8a6c 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -188,11 +188,11 @@ scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
sa = h->scsi_ctlr;
stk = &sa->cmd_stack;
+ stk->top++;
if (stk->top >= CMD_STACK_SIZE) {
printk("cciss: scsi_cmd_free called too many times.\n");
BUG();
}
- stk->top++;
stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd;
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 3390716898d5..e3f88d6e1412 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -84,6 +84,9 @@ struct drbd_bitmap {
#define BM_MD_IO_ERROR 1
#define BM_P_VMALLOCED 2
+static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+ unsigned long e, int val, const enum km_type km);
+
static int bm_is_locked(struct drbd_bitmap *b)
{
return test_bit(BM_LOCKED, &b->bm_flags);
@@ -441,7 +444,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
* In case this is actually a resize, we copy the old bitmap into the new one.
* Otherwise, the bitmap is initialized to all bits set.
*/
-int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
+int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
{
struct drbd_bitmap *b = mdev->bitmap;
unsigned long bits, words, owords, obits, *p_addr, *bm;
@@ -516,7 +519,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
obits = b->bm_bits;
growing = bits > obits;
- if (opages)
+ if (opages && growing && set_new_bits)
bm_set_surplus(b);
b->bm_pages = npages;
@@ -526,8 +529,12 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
b->bm_dev_capacity = capacity;
if (growing) {
- bm_memset(b, owords, 0xff, words-owords);
- b->bm_set += bits - obits;
+ if (set_new_bits) {
+ bm_memset(b, owords, 0xff, words-owords);
+ b->bm_set += bits - obits;
+ } else
+ bm_memset(b, owords, 0x00, words-owords);
+
}
if (want < have) {
@@ -773,7 +780,7 @@ static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int
/* nothing to do, on disk == in memory */
# define bm_cpu_to_lel(x) ((void)0)
# else
-void bm_cpu_to_lel(struct drbd_bitmap *b)
+static void bm_cpu_to_lel(struct drbd_bitmap *b)
{
/* need to cpu_to_lel all the pages ...
* this may be optimized by using
@@ -1015,7 +1022,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
* wants bitnr, not sector.
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
-int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned long e, int val, const enum km_type km)
{
struct drbd_bitmap *b = mdev->bitmap;
@@ -1053,7 +1060,7 @@ int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
* for val != 0, we change 0 -> 1, return code positive
* for val == 0, we change 1 -> 0, return code negative
* wants bitnr, not sector */
-int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
const unsigned long e, int val)
{
unsigned long flags;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e5e86a781820..485ed8c7d623 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -132,6 +132,7 @@ enum {
DRBD_FAULT_DT_RA = 6, /* data read ahead */
DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */
DRBD_FAULT_AL_EE = 8, /* alloc ee */
+ DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
DRBD_FAULT_MAX,
};
@@ -208,8 +209,11 @@ enum drbd_packets {
P_RS_IS_IN_SYNC = 0x22, /* meta socket */
P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */
+ /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */
+ /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */
+ P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
- P_MAX_CMD = 0x25,
+ P_MAX_CMD = 0x28,
P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
P_MAX_OPT_CMD = 0x101,
@@ -264,6 +268,7 @@ static inline const char *cmdname(enum drbd_packets cmd)
[P_CSUM_RS_REQUEST] = "CsumRSRequest",
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
[P_COMPRESSED_BITMAP] = "CBitmap",
+ [P_DELAY_PROBE] = "DelayProbe",
[P_MAX_CMD] = NULL,
};
@@ -481,7 +486,8 @@ struct p_sizes {
u64 u_size; /* user requested size */
u64 c_size; /* current exported size */
u32 max_segment_size; /* Maximal size of a BIO */
- u32 queue_order_type;
+ u16 queue_order_type; /* not yet implemented in DRBD*/
+ u16 dds_flags; /* use enum dds_flags here. */
} __packed;
struct p_state {
@@ -538,6 +544,18 @@ struct p_compressed_bm {
u8 code[0];
} __packed;
+struct p_delay_probe {
+ struct p_header head;
+ u32 seq_num; /* sequence number to match the two probe packets */
+ u32 offset; /* usecs the probe got sent after the reference time point */
+} __packed;
+
+struct delay_probe {
+ struct list_head list;
+ unsigned int seq_num;
+ struct timeval time;
+};
+
/* DCBP: Drbd Compressed Bitmap Packet ... */
static inline enum drbd_bitmap_code
DCBP_get_code(struct p_compressed_bm *p)
@@ -722,22 +740,6 @@ enum epoch_event {
EV_CLEANUP = 32, /* used as flag */
};
-struct drbd_epoch_entry {
- struct drbd_work w;
- struct drbd_conf *mdev;
- struct bio *private_bio;
- struct hlist_node colision;
- sector_t sector;
- unsigned int size;
- struct drbd_epoch *epoch;
-
- /* up to here, the struct layout is identical to drbd_request;
- * we might be able to use that to our advantage... */
-
- unsigned int flags;
- u64 block_id;
-};
-
struct drbd_wq_barrier {
struct drbd_work w;
struct completion done;
@@ -748,17 +750,49 @@ struct digest_info {
void *digest;
};
-/* ee flag bits */
+struct drbd_epoch_entry {
+ struct drbd_work w;
+ struct hlist_node colision;
+ struct drbd_epoch *epoch;
+ struct drbd_conf *mdev;
+ struct page *pages;
+ atomic_t pending_bios;
+ unsigned int size;
+ /* see comments on ee flag bits below */
+ unsigned long flags;
+ sector_t sector;
+ u64 block_id;
+};
+
+/* ee flag bits.
+ * While corresponding bios are in flight, the only modification will be
+ * set_bit WAS_ERROR, which has to be atomic.
+ * If no bios are in flight yet, or all have been completed,
+ * non-atomic modification to ee->flags is ok.
+ */
enum {
__EE_CALL_AL_COMPLETE_IO,
- __EE_CONFLICT_PENDING,
__EE_MAY_SET_IN_SYNC,
+
+ /* This epoch entry closes an epoch using a barrier.
+ * On sucessful completion, the epoch is released,
+ * and the P_BARRIER_ACK send. */
__EE_IS_BARRIER,
+
+ /* In case a barrier failed,
+ * we need to resubmit without the barrier flag. */
+ __EE_RESUBMITTED,
+
+ /* we may have several bios per epoch entry.
+ * if any of those fail, we set this flag atomically
+ * from the endio callback */
+ __EE_WAS_ERROR,
};
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
-#define EE_CONFLICT_PENDING (1<<__EE_CONFLICT_PENDING)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
+#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
+#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
/* global flag bits */
enum {
@@ -908,9 +942,11 @@ struct drbd_conf {
unsigned int ko_count;
struct drbd_work resync_work,
unplug_work,
- md_sync_work;
+ md_sync_work,
+ delay_probe_work;
struct timer_list resync_timer;
struct timer_list md_sync_timer;
+ struct timer_list delay_probe_timer;
/* Used after attach while negotiating new disk state. */
union drbd_state new_state_tmp;
@@ -1026,6 +1062,12 @@ struct drbd_conf {
u64 ed_uuid; /* UUID of the exposed data */
struct mutex state_mutex;
char congestion_reason; /* Why we where congested... */
+ struct list_head delay_probes; /* protected by peer_seq_lock */
+ int data_delay; /* Delay of packets on the data-sock behind meta-sock */
+ unsigned int delay_seq; /* To generate sequence numbers of delay probes */
+ struct timeval dps_time; /* delay-probes-start-time */
+ unsigned int dp_volume_last; /* send_cnt of last delay probe */
+ int c_sync_rate; /* current resync rate after delay_probe magic */
};
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1081,6 +1123,11 @@ enum chg_state_flags {
CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
};
+enum dds_flags {
+ DDSF_FORCED = 1,
+ DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
+};
+
extern void drbd_init_set_defaults(struct drbd_conf *mdev);
extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
union drbd_state mask, union drbd_state val);
@@ -1113,7 +1160,7 @@ extern int drbd_send_protocol(struct drbd_conf *mdev);
extern int drbd_send_uuids(struct drbd_conf *mdev);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val);
-extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply);
+extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
extern int _drbd_send_state(struct drbd_conf *mdev);
extern int drbd_send_state(struct drbd_conf *mdev);
extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
@@ -1311,7 +1358,7 @@ struct bm_extent {
#define APP_R_HSIZE 15
extern int drbd_bm_init(struct drbd_conf *mdev);
-extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors);
+extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
extern void drbd_bm_cleanup(struct drbd_conf *mdev);
extern void drbd_bm_set_all(struct drbd_conf *mdev);
extern void drbd_bm_clear_all(struct drbd_conf *mdev);
@@ -1383,7 +1430,7 @@ extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size);
extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
-extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local);
+extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *);
extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
@@ -1414,7 +1461,8 @@ static inline void ov_oos_print(struct drbd_conf *mdev)
}
-extern void drbd_csum(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
+extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
+extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *);
/* worker callbacks */
extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
@@ -1426,7 +1474,6 @@ extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int);
extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int);
extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int);
extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int);
-extern int w_io_error(struct drbd_conf *, struct drbd_work *, int);
extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int);
extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int);
extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int);
@@ -1438,6 +1485,8 @@ extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
extern void resync_timer_fn(unsigned long data);
/* drbd_receiver.c */
+extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
+ const unsigned rw, const int fault_type);
extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
u64 id,
@@ -1490,7 +1539,7 @@ static inline void drbd_tcp_nodelay(struct socket *sock)
static inline void drbd_tcp_quickack(struct socket *sock)
{
- int __user val = 1;
+ int __user val = 2;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
(char __user *)&val, sizeof(val));
}
@@ -1593,6 +1642,41 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
* inline helper functions
*************************/
+/* see also page_chain_add and friends in drbd_receiver.c */
+static inline struct page *page_chain_next(struct page *page)
+{
+ return (struct page *)page_private(page);
+}
+#define page_chain_for_each(page) \
+ for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
+ page = page_chain_next(page))
+#define page_chain_for_each_safe(page, n) \
+ for (; page && ({ n = page_chain_next(page); 1; }); page = n)
+
+static inline int drbd_bio_has_active_page(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ int i;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ if (page_count(bvec->bv_page) > 1)
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
+{
+ struct page *page = e->pages;
+ page_chain_for_each(page) {
+ if (page_count(page) > 1)
+ return 1;
+ }
+ return 0;
+}
+
+
static inline void drbd_state_lock(struct drbd_conf *mdev)
{
wait_event(mdev->misc_wait,
@@ -1641,7 +1725,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
switch (mdev->ldev->dc.on_io_error) {
case EP_PASS_ON:
if (!forcedetach) {
- if (printk_ratelimit())
+ if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Local IO failed in %s."
"Passing error on...\n", where);
break;
@@ -2138,7 +2222,7 @@ static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
/* I'd like to use wait_event_lock_irq,
* but I'm not sure when it got introduced,
* and not sure when it has 3 or 4 arguments */
-static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two)
+static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
{
/* compare with after_state_ch,
* os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */
@@ -2160,7 +2244,7 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two)
finish_wait(&mdev->misc_wait, &wait);
spin_lock_irq(&mdev->req_lock);
}
- atomic_add(one_or_two, &mdev->ap_bio_cnt);
+ atomic_add(count, &mdev->ap_bio_cnt);
spin_unlock_irq(&mdev->req_lock);
}
@@ -2251,7 +2335,8 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
if (test_bit(MD_NO_BARRIER, &mdev->flags))
return;
- r = blkdev_issue_flush(mdev->ldev->md_bdev, NULL);
+ r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL,
+ BLKDEV_IFL_WAIT);
if (r) {
set_bit(MD_NO_BARRIER, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 93d1f9b469d4..6b077f93acc6 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -684,6 +684,9 @@ static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
rv = SS_NO_REMOTE_DISK;
+ else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
else if ((ns.conn == C_CONNECTED ||
ns.conn == C_WF_BITMAP_S ||
ns.conn == C_SYNC_SOURCE ||
@@ -840,7 +843,12 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
break;
case C_WF_BITMAP_S:
case C_PAUSED_SYNC_S:
- ns.pdsk = D_OUTDATED;
+ /* remap any consistent state to D_OUTDATED,
+ * but disallow "upgrade" of not even consistent states.
+ */
+ ns.pdsk =
+ (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED)
+ ? os.pdsk : D_OUTDATED;
break;
case C_SYNC_SOURCE:
ns.pdsk = D_INCONSISTENT;
@@ -1205,8 +1213,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
&& (ns.pdsk < D_INCONSISTENT ||
ns.pdsk == D_UNKNOWN ||
ns.pdsk == D_OUTDATED)) {
- kfree(mdev->p_uuid);
- mdev->p_uuid = NULL;
if (get_ldev(mdev)) {
if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
@@ -1232,7 +1238,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
- drbd_send_sizes(mdev, 0); /* to start sync... */
+ drbd_send_sizes(mdev, 0, 0); /* to start sync... */
drbd_send_uuids(mdev);
drbd_send_state(mdev);
}
@@ -1755,7 +1761,7 @@ int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
(struct p_header *)&p, sizeof(p));
}
-int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
+int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
{
struct p_sizes p;
sector_t d_size, u_size;
@@ -1767,7 +1773,6 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
d_size = drbd_get_max_capacity(mdev->ldev);
u_size = mdev->ldev->dc.disk_size;
q_order_type = drbd_queue_order_type(mdev);
- p.queue_order_type = cpu_to_be32(drbd_queue_order_type(mdev));
put_ldev(mdev);
} else {
d_size = 0;
@@ -1779,7 +1784,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
p.u_size = cpu_to_be64(u_size);
p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue));
- p.queue_order_type = cpu_to_be32(q_order_type);
+ p.queue_order_type = cpu_to_be16(q_order_type);
+ p.dds_flags = cpu_to_be16(flags);
ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
(struct p_header *)&p, sizeof(p));
@@ -2180,6 +2186,43 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
return ok;
}
+static int drbd_send_delay_probe(struct drbd_conf *mdev, struct drbd_socket *ds)
+{
+ struct p_delay_probe dp;
+ int offset, ok = 0;
+ struct timeval now;
+
+ mutex_lock(&ds->mutex);
+ if (likely(ds->socket)) {
+ do_gettimeofday(&now);
+ offset = now.tv_usec - mdev->dps_time.tv_usec +
+ (now.tv_sec - mdev->dps_time.tv_sec) * 1000000;
+ dp.seq_num = cpu_to_be32(mdev->delay_seq);
+ dp.offset = cpu_to_be32(offset);
+
+ ok = _drbd_send_cmd(mdev, ds->socket, P_DELAY_PROBE,
+ (struct p_header *)&dp, sizeof(dp), 0);
+ }
+ mutex_unlock(&ds->mutex);
+
+ return ok;
+}
+
+static int drbd_send_delay_probes(struct drbd_conf *mdev)
+{
+ int ok;
+
+ mdev->delay_seq++;
+ do_gettimeofday(&mdev->dps_time);
+ ok = drbd_send_delay_probe(mdev, &mdev->meta);
+ ok = ok && drbd_send_delay_probe(mdev, &mdev->data);
+
+ mdev->dp_volume_last = mdev->send_cnt;
+ mod_timer(&mdev->delay_probe_timer, jiffies + mdev->sync_conf.dp_interval * HZ / 10);
+
+ return ok;
+}
+
/* called on sndtimeo
* returns FALSE if we should retry,
* TRUE if we think connection is dead
@@ -2229,9 +2272,9 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *
* with page_count == 0 or PageSlab.
*/
static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
- int offset, size_t size)
+ int offset, size_t size, unsigned msg_flags)
{
- int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, 0);
+ int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
kunmap(page);
if (sent == size)
mdev->send_cnt += size>>9;
@@ -2239,7 +2282,7 @@ static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
}
static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
- int offset, size_t size)
+ int offset, size_t size, unsigned msg_flags)
{
mm_segment_t oldfs = get_fs();
int sent, ok;
@@ -2252,14 +2295,15 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
- return _drbd_no_send_page(mdev, page, offset, size);
+ return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
+ msg_flags |= MSG_NOSIGNAL;
drbd_update_congested(mdev);
set_fs(KERNEL_DS);
do {
sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
offset, len,
- MSG_NOSIGNAL);
+ msg_flags);
if (sent == -EAGAIN) {
if (we_should_drop_the_connection(mdev,
mdev->data.socket))
@@ -2288,9 +2332,11 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
{
struct bio_vec *bvec;
int i;
+ /* hint all but last page with MSG_MORE */
__bio_for_each_segment(bvec, bio, i, 0) {
if (!_drbd_no_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len))
+ bvec->bv_offset, bvec->bv_len,
+ i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
return 0;
}
return 1;
@@ -2300,15 +2346,56 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
{
struct bio_vec *bvec;
int i;
+ /* hint all but last page with MSG_MORE */
__bio_for_each_segment(bvec, bio, i, 0) {
if (!_drbd_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len))
+ bvec->bv_offset, bvec->bv_len,
+ i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
+ return 0;
+ }
+ return 1;
+}
+
+static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+{
+ struct page *page = e->pages;
+ unsigned len = e->size;
+ /* hint all but last page with MSG_MORE */
+ page_chain_for_each(page) {
+ unsigned l = min_t(unsigned, len, PAGE_SIZE);
+ if (!_drbd_send_page(mdev, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0))
return 0;
+ len -= l;
}
+ return 1;
+}
+
+static void consider_delay_probes(struct drbd_conf *mdev)
+{
+ if (mdev->state.conn != C_SYNC_SOURCE || mdev->agreed_pro_version < 93)
+ return;
+
+ if (mdev->dp_volume_last + mdev->sync_conf.dp_volume * 2 < mdev->send_cnt)
+ drbd_send_delay_probes(mdev);
+}
+
+static int w_delay_probes(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+{
+ if (!cancel && mdev->state.conn == C_SYNC_SOURCE)
+ drbd_send_delay_probes(mdev);
return 1;
}
+static void delay_probe_timer_fn(unsigned long data)
+{
+ struct drbd_conf *mdev = (struct drbd_conf *) data;
+
+ if (list_empty(&mdev->delay_probe_work.list))
+ drbd_queue_work(&mdev->data.work, &mdev->delay_probe_work);
+}
+
/* Used to send write requests
* R_PRIMARY -> Peer (P_DATA)
*/
@@ -2357,11 +2444,11 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
p.dp_flags = cpu_to_be32(dp_flags);
set_bit(UNPLUG_REMOTE, &mdev->flags);
ok = (sizeof(p) ==
- drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE));
+ drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
if (ok && dgs) {
dgb = mdev->int_dig_out;
- drbd_csum(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
- ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
+ drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
+ ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
}
if (ok) {
if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
@@ -2371,6 +2458,10 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
}
drbd_put_data_sock(mdev);
+
+ if (ok)
+ consider_delay_probes(mdev);
+
return ok;
}
@@ -2406,16 +2497,20 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
return 0;
ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p,
- sizeof(p), MSG_MORE);
+ sizeof(p), dgs ? MSG_MORE : 0);
if (ok && dgs) {
dgb = mdev->int_dig_out;
- drbd_csum(mdev, mdev->integrity_w_tfm, e->private_bio, dgb);
- ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
+ drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
+ ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
}
if (ok)
- ok = _drbd_send_zc_bio(mdev, e->private_bio);
+ ok = _drbd_send_zc_ee(mdev, e);
drbd_put_data_sock(mdev);
+
+ if (ok)
+ consider_delay_probes(mdev);
+
return ok;
}
@@ -2628,16 +2723,24 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
INIT_LIST_HEAD(&mdev->unplug_work.list);
INIT_LIST_HEAD(&mdev->md_sync_work.list);
INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
+ INIT_LIST_HEAD(&mdev->delay_probes);
+ INIT_LIST_HEAD(&mdev->delay_probe_work.list);
+
mdev->resync_work.cb = w_resync_inactive;
mdev->unplug_work.cb = w_send_write_hint;
mdev->md_sync_work.cb = w_md_sync;
mdev->bm_io_work.w.cb = w_bitmap_io;
+ mdev->delay_probe_work.cb = w_delay_probes;
init_timer(&mdev->resync_timer);
init_timer(&mdev->md_sync_timer);
+ init_timer(&mdev->delay_probe_timer);
mdev->resync_timer.function = resync_timer_fn;
mdev->resync_timer.data = (unsigned long) mdev;
mdev->md_sync_timer.function = md_sync_timer_fn;
mdev->md_sync_timer.data = (unsigned long) mdev;
+ mdev->delay_probe_timer.function = delay_probe_timer_fn;
+ mdev->delay_probe_timer.data = (unsigned long) mdev;
+
init_waitqueue_head(&mdev->misc_wait);
init_waitqueue_head(&mdev->state_wait);
@@ -2680,7 +2783,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
drbd_set_my_capacity(mdev, 0);
if (mdev->bitmap) {
/* maybe never allocated. */
- drbd_bm_resize(mdev, 0);
+ drbd_bm_resize(mdev, 0, 1);
drbd_bm_cleanup(mdev);
}
@@ -3129,7 +3232,7 @@ int __init drbd_init(void)
if (err)
goto Enomem;
- drbd_proc = proc_create("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops);
+ drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
if (!drbd_proc) {
printk(KERN_ERR "drbd: unable to register proc file\n");
goto Enomem;
@@ -3660,7 +3763,8 @@ _drbd_fault_str(unsigned int type) {
[DRBD_FAULT_DT_RD] = "Data read",
[DRBD_FAULT_DT_RA] = "Data read ahead",
[DRBD_FAULT_BM_ALLOC] = "BM allocation",
- [DRBD_FAULT_AL_EE] = "EE allocation"
+ [DRBD_FAULT_AL_EE] = "EE allocation",
+ [DRBD_FAULT_RECEIVE] = "receive data corruption",
};
return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
@@ -3679,7 +3783,7 @@ _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
if (ret) {
fault_count++;
- if (printk_ratelimit())
+ if (__ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "***Simulating %s failure\n",
_drbd_fault_str(type));
}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 6429d2b19e06..632e3245d1bb 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
* Returns 0 on success, negative return values indicate errors.
* You should call drbd_md_sync() after calling this function.
*/
-enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local)
+enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size;
@@ -541,12 +541,12 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force
/* TODO: should only be some assert here, not (re)init... */
drbd_md_set_sector_offsets(mdev, mdev->ldev);
- size = drbd_new_dev_size(mdev, mdev->ldev, force);
+ size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
if (drbd_get_capacity(mdev->this_bdev) != size ||
drbd_bm_capacity(mdev) != size) {
int err;
- err = drbd_bm_resize(mdev, size);
+ err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
if (unlikely(err)) {
/* currently there is only one error: ENOMEM! */
size = drbd_bm_capacity(mdev)>>1;
@@ -704,9 +704,6 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
int max_segments = mdev->ldev->dc.max_bio_bvecs;
- if (b->merge_bvec_fn && !mdev->ldev->dc.use_bmbv)
- max_seg_s = PAGE_SIZE;
-
max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
blk_queue_max_hw_sectors(q, max_seg_s >> 9);
@@ -1199,13 +1196,12 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
/* allocation not in the IO path, cqueue thread context */
- new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
+ new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_conf) {
retcode = ERR_NOMEM;
goto fail;
}
- memset(new_conf, 0, sizeof(struct net_conf));
new_conf->timeout = DRBD_TIMEOUT_DEF;
new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
new_conf->ping_int = DRBD_PING_INT_DEF;
@@ -1477,8 +1473,8 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
{
struct resize rs;
int retcode = NO_ERROR;
- int ldsc = 0; /* local disk size changed */
enum determine_dev_size dd;
+ enum dds_flags ddsf;
memset(&rs, 0, sizeof(struct resize));
if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
@@ -1502,13 +1498,17 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
- mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
- ldsc = 1;
+ if (rs.no_resync && mdev->agreed_pro_version < 93) {
+ retcode = ERR_NEED_APV_93;
+ goto fail;
}
+ if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
+ mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
+
mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
- dd = drbd_determin_dev_size(mdev, rs.resize_force);
+ ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
+ dd = drbd_determin_dev_size(mdev, ddsf);
drbd_md_sync(mdev);
put_ldev(mdev);
if (dd == dev_size_error) {
@@ -1516,12 +1516,12 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (mdev->state.conn == C_CONNECTED && (dd != unchanged || ldsc)) {
+ if (mdev->state.conn == C_CONNECTED) {
if (dd == grew)
set_bit(RESIZE_PENDING, &mdev->flags);
drbd_send_uuids(mdev);
- drbd_send_sizes(mdev, 1);
+ drbd_send_sizes(mdev, 1, ddsf);
}
fail:
@@ -1551,6 +1551,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
sc.rate = DRBD_RATE_DEF;
sc.after = DRBD_AFTER_DEF;
sc.al_extents = DRBD_AL_EXTENTS_DEF;
+ sc.dp_volume = DRBD_DP_VOLUME_DEF;
+ sc.dp_interval = DRBD_DP_INTERVAL_DEF;
+ sc.throttle_th = DRBD_RS_THROTTLE_TH_DEF;
+ sc.hold_off_th = DRBD_RS_HOLD_OFF_TH_DEF;
} else
memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
@@ -2207,9 +2211,9 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
{
struct cn_msg *cn_reply;
struct drbd_nl_cfg_reply *reply;
- struct bio_vec *bvec;
unsigned short *tl;
- int i;
+ struct page *page;
+ unsigned len;
if (!e)
return;
@@ -2247,11 +2251,15 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
put_unaligned(T_ee_data, tl++);
put_unaligned(e->size, tl++);
- __bio_for_each_segment(bvec, e->private_bio, i, 0) {
- void *d = kmap(bvec->bv_page);
- memcpy(tl, d + bvec->bv_offset, bvec->bv_len);
- kunmap(bvec->bv_page);
- tl=(unsigned short*)((char*)tl + bvec->bv_len);
+ len = e->size;
+ page = e->pages;
+ page_chain_for_each(page) {
+ void *d = kmap_atomic(page, KM_USER0);
+ unsigned l = min_t(unsigned, len, PAGE_SIZE);
+ memcpy(tl, d, l);
+ kunmap_atomic(d, KM_USER0);
+ tl = (unsigned short*)((char*)tl + l);
+ len -= l;
}
put_unaligned(TT_END, tl++); /* Close the tag list */
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index be3374b68460..d0f1767ea4c3 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -73,14 +73,21 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10);
/* if more than 1 GB display in MB */
if (mdev->rs_total > 0x100000L)
- seq_printf(seq, "(%lu/%lu)M\n\t",
+ seq_printf(seq, "(%lu/%lu)M",
(unsigned long) Bit2KB(rs_left >> 10),
(unsigned long) Bit2KB(mdev->rs_total >> 10));
else
- seq_printf(seq, "(%lu/%lu)K\n\t",
+ seq_printf(seq, "(%lu/%lu)K",
(unsigned long) Bit2KB(rs_left),
(unsigned long) Bit2KB(mdev->rs_total));
+ if (mdev->state.conn == C_SYNC_TARGET)
+ seq_printf(seq, " queue_delay: %d.%d ms\n\t",
+ mdev->data_delay / 1000,
+ (mdev->data_delay % 1000) / 100);
+ else if (mdev->state.conn == C_SYNC_SOURCE)
+ seq_printf(seq, " delay_probe: %u\n\t", mdev->delay_seq);
+
/* see drivers/md/md.c
* We do not want to overflow, so the order of operands and
* the * 100 / 100 trick are important. We do a +1 to be
@@ -128,6 +135,14 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
else
seq_printf(seq, " (%ld)", dbdt);
+ if (mdev->state.conn == C_SYNC_TARGET) {
+ if (mdev->c_sync_rate > 1000)
+ seq_printf(seq, " want: %d,%03d",
+ mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000);
+ else
+ seq_printf(seq, " want: %d", mdev->c_sync_rate);
+ }
+
seq_printf(seq, " K/sec\n");
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 3f096e7959b4..dff48701b84d 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -42,7 +42,6 @@
#include <linux/unistd.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
-#include <linux/mm.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
#include "drbd_int.h"
@@ -80,30 +79,128 @@ static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epo
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
-static struct page *drbd_pp_first_page_or_try_alloc(struct drbd_conf *mdev)
+/*
+ * some helper functions to deal with single linked page lists,
+ * page->private being our "next" pointer.
+ */
+
+/* If at least n pages are linked at head, get n pages off.
+ * Otherwise, don't modify head, and return NULL.
+ * Locking is the responsibility of the caller.
+ */
+static struct page *page_chain_del(struct page **head, int n)
+{
+ struct page *page;
+ struct page *tmp;
+
+ BUG_ON(!n);
+ BUG_ON(!head);
+
+ page = *head;
+
+ if (!page)
+ return NULL;
+
+ while (page) {
+ tmp = page_chain_next(page);
+ if (--n == 0)
+ break; /* found sufficient pages */
+ if (tmp == NULL)
+ /* insufficient pages, don't use any of them. */
+ return NULL;
+ page = tmp;
+ }
+
+ /* add end of list marker for the returned list */
+ set_page_private(page, 0);
+ /* actual return value, and adjustment of head */
+ page = *head;
+ *head = tmp;
+ return page;
+}
+
+/* may be used outside of locks to find the tail of a (usually short)
+ * "private" page chain, before adding it back to a global chain head
+ * with page_chain_add() under a spinlock. */
+static struct page *page_chain_tail(struct page *page, int *len)
+{
+ struct page *tmp;
+ int i = 1;
+ while ((tmp = page_chain_next(page)))
+ ++i, page = tmp;
+ if (len)
+ *len = i;
+ return page;
+}
+
+static int page_chain_free(struct page *page)
+{
+ struct page *tmp;
+ int i = 0;
+ page_chain_for_each_safe(page, tmp) {
+ put_page(page);
+ ++i;
+ }
+ return i;
+}
+
+static void page_chain_add(struct page **head,
+ struct page *chain_first, struct page *chain_last)
+{
+#if 1
+ struct page *tmp;
+ tmp = page_chain_tail(chain_first, NULL);
+ BUG_ON(tmp != chain_last);
+#endif
+
+ /* add chain to head */
+ set_page_private(chain_last, (unsigned long)*head);
+ *head = chain_first;
+}
+
+static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
{
struct page *page = NULL;
+ struct page *tmp = NULL;
+ int i = 0;
/* Yes, testing drbd_pp_vacant outside the lock is racy.
* So what. It saves a spin_lock. */
- if (drbd_pp_vacant > 0) {
+ if (drbd_pp_vacant >= number) {
spin_lock(&drbd_pp_lock);
- page = drbd_pp_pool;
- if (page) {
- drbd_pp_pool = (struct page *)page_private(page);
- set_page_private(page, 0); /* just to be polite */
- drbd_pp_vacant--;
- }
+ page = page_chain_del(&drbd_pp_pool, number);
+ if (page)
+ drbd_pp_vacant -= number;
spin_unlock(&drbd_pp_lock);
+ if (page)
+ return page;
}
+
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- if (!page)
- page = alloc_page(GFP_TRY);
- if (page)
- atomic_inc(&mdev->pp_in_use);
- return page;
+ for (i = 0; i < number; i++) {
+ tmp = alloc_page(GFP_TRY);
+ if (!tmp)
+ break;
+ set_page_private(tmp, (unsigned long)page);
+ page = tmp;
+ }
+
+ if (i == number)
+ return page;
+
+ /* Not enough pages immediately available this time.
+ * No need to jump around here, drbd_pp_alloc will retry this
+ * function "soon". */
+ if (page) {
+ tmp = page_chain_tail(page, NULL);
+ spin_lock(&drbd_pp_lock);
+ page_chain_add(&drbd_pp_pool, page, tmp);
+ drbd_pp_vacant += i;
+ spin_unlock(&drbd_pp_lock);
+ }
+ return NULL;
}
/* kick lower level device, if we have more than (arbitrary number)
@@ -127,7 +224,7 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
list_for_each_safe(le, tle, &mdev->net_ee) {
e = list_entry(le, struct drbd_epoch_entry, w.list);
- if (drbd_bio_has_active_page(e->private_bio))
+ if (drbd_ee_has_active_page(e))
break;
list_move(le, to_be_freed);
}
@@ -148,32 +245,34 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
}
/**
- * drbd_pp_alloc() - Returns a page, fails only if a signal comes in
+ * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
* @mdev: DRBD device.
- * @retry: whether or not to retry allocation forever (or until signalled)
+ * @number: number of pages requested
+ * @retry: whether to retry, if not enough pages are available right now
+ *
+ * Tries to allocate number pages, first from our own page pool, then from
+ * the kernel, unless this allocation would exceed the max_buffers setting.
+ * Possibly retry until DRBD frees sufficient pages somewhere else.
*
- * Tries to allocate a page, first from our own page pool, then from the
- * kernel, unless this allocation would exceed the max_buffers setting.
- * If @retry is non-zero, retry until DRBD frees a page somewhere else.
+ * Returns a page chain linked via page->private.
*/
-static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry)
+static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
{
struct page *page = NULL;
DEFINE_WAIT(wait);
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
- page = drbd_pp_first_page_or_try_alloc(mdev);
- if (page)
- return page;
- }
+ /* Yes, we may run up to @number over max_buffers. If we
+ * follow it strictly, the admin will get it wrong anyways. */
+ if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
+ page = drbd_pp_first_pages_or_try_alloc(mdev, number);
- for (;;) {
+ while (page == NULL) {
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
drbd_kick_lo_and_reclaim_net(mdev);
if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
- page = drbd_pp_first_page_or_try_alloc(mdev);
+ page = drbd_pp_first_pages_or_try_alloc(mdev, number);
if (page)
break;
}
@@ -190,62 +289,32 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry)
}
finish_wait(&drbd_pp_wait, &wait);
+ if (page)
+ atomic_add(number, &mdev->pp_in_use);
return page;
}
/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
- * Is also used from inside an other spin_lock_irq(&mdev->req_lock) */
+ * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
+ * Either links the page chain back to the global pool,
+ * or returns all pages to the system. */
static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
{
- int free_it;
-
- spin_lock(&drbd_pp_lock);
- if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
- free_it = 1;
- } else {
- set_page_private(page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = page;
- drbd_pp_vacant++;
- free_it = 0;
- }
- spin_unlock(&drbd_pp_lock);
-
- atomic_dec(&mdev->pp_in_use);
-
- if (free_it)
- __free_page(page);
-
- wake_up(&drbd_pp_wait);
-}
-
-static void drbd_pp_free_bio_pages(struct drbd_conf *mdev, struct bio *bio)
-{
- struct page *p_to_be_freed = NULL;
- struct page *page;
- struct bio_vec *bvec;
int i;
-
- spin_lock(&drbd_pp_lock);
- __bio_for_each_segment(bvec, bio, i, 0) {
- if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
- set_page_private(bvec->bv_page, (unsigned long)p_to_be_freed);
- p_to_be_freed = bvec->bv_page;
- } else {
- set_page_private(bvec->bv_page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = bvec->bv_page;
- drbd_pp_vacant++;
- }
- }
- spin_unlock(&drbd_pp_lock);
- atomic_sub(bio->bi_vcnt, &mdev->pp_in_use);
-
- while (p_to_be_freed) {
- page = p_to_be_freed;
- p_to_be_freed = (struct page *)page_private(page);
- set_page_private(page, 0); /* just to be polite */
- put_page(page);
+ if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
+ i = page_chain_free(page);
+ else {
+ struct page *tmp;
+ tmp = page_chain_tail(page, &i);
+ spin_lock(&drbd_pp_lock);
+ page_chain_add(&drbd_pp_pool, page, tmp);
+ drbd_pp_vacant += i;
+ spin_unlock(&drbd_pp_lock);
}
-
+ atomic_sub(i, &mdev->pp_in_use);
+ i = atomic_read(&mdev->pp_in_use);
+ if (i < 0)
+ dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
wake_up(&drbd_pp_wait);
}
@@ -270,11 +339,9 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
unsigned int data_size,
gfp_t gfp_mask) __must_hold(local)
{
- struct request_queue *q;
struct drbd_epoch_entry *e;
struct page *page;
- struct bio *bio;
- unsigned int ds;
+ unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
return NULL;
@@ -286,84 +353,32 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
return NULL;
}
- bio = bio_alloc(gfp_mask & ~__GFP_HIGHMEM, div_ceil(data_size, PAGE_SIZE));
- if (!bio) {
- if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "alloc_ee: Allocation of a bio failed\n");
- goto fail1;
- }
-
- bio->bi_bdev = mdev->ldev->backing_bdev;
- bio->bi_sector = sector;
-
- ds = data_size;
- while (ds) {
- page = drbd_pp_alloc(mdev, (gfp_mask & __GFP_WAIT));
- if (!page) {
- if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "alloc_ee: Allocation of a page failed\n");
- goto fail2;
- }
- if (!bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0)) {
- drbd_pp_free(mdev, page);
- dev_err(DEV, "alloc_ee: bio_add_page(s=%llu,"
- "data_size=%u,ds=%u) failed\n",
- (unsigned long long)sector, data_size, ds);
-
- q = bdev_get_queue(bio->bi_bdev);
- if (q->merge_bvec_fn) {
- struct bvec_merge_data bvm = {
- .bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_sector,
- .bi_size = bio->bi_size,
- .bi_rw = bio->bi_rw,
- };
- int l = q->merge_bvec_fn(q, &bvm,
- &bio->bi_io_vec[bio->bi_vcnt]);
- dev_err(DEV, "merge_bvec_fn() = %d\n", l);
- }
-
- /* dump more of the bio. */
- dev_err(DEV, "bio->bi_max_vecs = %d\n", bio->bi_max_vecs);
- dev_err(DEV, "bio->bi_vcnt = %d\n", bio->bi_vcnt);
- dev_err(DEV, "bio->bi_size = %d\n", bio->bi_size);
- dev_err(DEV, "bio->bi_phys_segments = %d\n", bio->bi_phys_segments);
-
- goto fail2;
- break;
- }
- ds -= min_t(int, ds, PAGE_SIZE);
- }
-
- D_ASSERT(data_size == bio->bi_size);
-
- bio->bi_private = e;
- e->mdev = mdev;
- e->sector = sector;
- e->size = bio->bi_size;
+ page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+ if (!page)
+ goto fail;
- e->private_bio = bio;
- e->block_id = id;
INIT_HLIST_NODE(&e->colision);
e->epoch = NULL;
+ e->mdev = mdev;
+ e->pages = page;
+ atomic_set(&e->pending_bios, 0);
+ e->size = data_size;
e->flags = 0;
+ e->sector = sector;
+ e->sector = sector;
+ e->block_id = id;
return e;
- fail2:
- drbd_pp_free_bio_pages(mdev, bio);
- bio_put(bio);
- fail1:
+ fail:
mempool_free(e, drbd_ee_mempool);
-
return NULL;
}
void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
{
- struct bio *bio = e->private_bio;
- drbd_pp_free_bio_pages(mdev, bio);
- bio_put(bio);
+ drbd_pp_free(mdev, e->pages);
+ D_ASSERT(atomic_read(&e->pending_bios) == 0);
D_ASSERT(hlist_unhashed(&e->colision));
mempool_free(e, drbd_ee_mempool);
}
@@ -555,6 +570,25 @@ static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
return rv;
}
+/* quoting tcp(7):
+ * On individual connections, the socket buffer size must be set prior to the
+ * listen(2) or connect(2) calls in order to have it take effect.
+ * This is our wrapper to do so.
+ */
+static void drbd_setbufsize(struct socket *sock, unsigned int snd,
+ unsigned int rcv)
+{
+ /* open coded SO_SNDBUF, SO_RCVBUF */
+ if (snd) {
+ sock->sk->sk_sndbuf = snd;
+ sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ }
+ if (rcv) {
+ sock->sk->sk_rcvbuf = rcv;
+ sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+ }
+}
+
static struct socket *drbd_try_connect(struct drbd_conf *mdev)
{
const char *what;
@@ -576,6 +610,8 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
sock->sk->sk_rcvtimeo =
sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
+ drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
+ mdev->net_conf->rcvbuf_size);
/* explicitly bind to the configured IP as source IP
* for the outgoing connections.
@@ -654,6 +690,8 @@ static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
s_listen->sk->sk_rcvtimeo = timeo;
s_listen->sk->sk_sndtimeo = timeo;
+ drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
+ mdev->net_conf->rcvbuf_size);
what = "bind before listen";
err = s_listen->ops->bind(s_listen,
@@ -840,16 +878,6 @@ retry:
sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
- if (mdev->net_conf->sndbuf_size) {
- sock->sk->sk_sndbuf = mdev->net_conf->sndbuf_size;
- sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- }
-
- if (mdev->net_conf->rcvbuf_size) {
- sock->sk->sk_rcvbuf = mdev->net_conf->rcvbuf_size;
- sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
- }
-
/* NOT YET ...
* sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
* sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
@@ -902,7 +930,7 @@ retry:
if (!drbd_send_protocol(mdev))
return -1;
drbd_send_sync_param(mdev, &mdev->sync_conf);
- drbd_send_sizes(mdev, 0);
+ drbd_send_sizes(mdev, 0, 0);
drbd_send_uuids(mdev);
drbd_send_state(mdev);
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
@@ -946,7 +974,8 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
int rv;
if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
- rv = blkdev_issue_flush(mdev->ldev->backing_bdev, NULL);
+ rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
+ NULL, BLKDEV_IFL_WAIT);
if (rv) {
dev_err(DEV, "local disk flush failed with status %d\n", rv);
/* would rather check on EOPNOTSUPP, but that is not reliable.
@@ -1120,6 +1149,90 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
}
/**
+ * drbd_submit_ee()
+ * @mdev: DRBD device.
+ * @e: epoch entry
+ * @rw: flag field, see bio->bi_rw
+ */
+/* TODO allocate from our own bio_set. */
+int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
+ const unsigned rw, const int fault_type)
+{
+ struct bio *bios = NULL;
+ struct bio *bio;
+ struct page *page = e->pages;
+ sector_t sector = e->sector;
+ unsigned ds = e->size;
+ unsigned n_bios = 0;
+ unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
+
+ /* In most cases, we will only need one bio. But in case the lower
+ * level restrictions happen to be different at this offset on this
+ * side than those of the sending peer, we may need to submit the
+ * request in more than one bio. */
+next_bio:
+ bio = bio_alloc(GFP_NOIO, nr_pages);
+ if (!bio) {
+ dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
+ goto fail;
+ }
+ /* > e->sector, unless this is the first bio */
+ bio->bi_sector = sector;
+ bio->bi_bdev = mdev->ldev->backing_bdev;
+ /* we special case some flags in the multi-bio case, see below
+ * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */
+ bio->bi_rw = rw;
+ bio->bi_private = e;
+ bio->bi_end_io = drbd_endio_sec;
+
+ bio->bi_next = bios;
+ bios = bio;
+ ++n_bios;
+
+ page_chain_for_each(page) {
+ unsigned len = min_t(unsigned, ds, PAGE_SIZE);
+ if (!bio_add_page(bio, page, len, 0)) {
+ /* a single page must always be possible! */
+ BUG_ON(bio->bi_vcnt == 0);
+ goto next_bio;
+ }
+ ds -= len;
+ sector += len >> 9;
+ --nr_pages;
+ }
+ D_ASSERT(page == NULL);
+ D_ASSERT(ds == 0);
+
+ atomic_set(&e->pending_bios, n_bios);
+ do {
+ bio = bios;
+ bios = bios->bi_next;
+ bio->bi_next = NULL;
+
+ /* strip off BIO_RW_UNPLUG unless it is the last bio */
+ if (bios)
+ bio->bi_rw &= ~(1<<BIO_RW_UNPLUG);
+
+ drbd_generic_make_request(mdev, fault_type, bio);
+
+ /* strip off BIO_RW_BARRIER,
+ * unless it is the first or last bio */
+ if (bios && bios->bi_next)
+ bios->bi_rw &= ~(1<<BIO_RW_BARRIER);
+ } while (bios);
+ maybe_kick_lo(mdev);
+ return 0;
+
+fail:
+ while (bios) {
+ bio = bios;
+ bios = bios->bi_next;
+ bio_put(bio);
+ }
+ return -ENOMEM;
+}
+
+/**
* w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
* @mdev: DRBD device.
* @w: work object.
@@ -1128,8 +1241,6 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
{
struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- struct bio *bio = e->private_bio;
-
/* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
(and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
so that we can finish that epoch in drbd_may_finish_epoch().
@@ -1143,33 +1254,17 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
if (previous_epoch(mdev, e->epoch))
dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
- /* prepare bio for re-submit,
- * re-init volatile members */
/* we still have a local reference,
* get_ldev was done in receive_Data. */
- bio->bi_bdev = mdev->ldev->backing_bdev;
- bio->bi_sector = e->sector;
- bio->bi_size = e->size;
- bio->bi_idx = 0;
-
- bio->bi_flags &= ~(BIO_POOL_MASK - 1);
- bio->bi_flags |= 1 << BIO_UPTODATE;
-
- /* don't know whether this is necessary: */
- bio->bi_phys_segments = 0;
- bio->bi_next = NULL;
-
- /* these should be unchanged: */
- /* bio->bi_end_io = drbd_endio_write_sec; */
- /* bio->bi_vcnt = whatever; */
e->w.cb = e_end_block;
-
- /* This is no longer a barrier request. */
- bio->bi_rw &= ~(1UL << BIO_RW_BARRIER);
-
- drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, bio);
-
+ if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
+ /* drbd_submit_ee fails for one reason only:
+ * if was not able to allocate sufficient bios.
+ * requeue, try again later. */
+ e->w.cb = w_e_reissue;
+ drbd_queue_work(&mdev->data.work, &e->w);
+ }
return 1;
}
@@ -1261,13 +1356,13 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
static struct drbd_epoch_entry *
read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
{
+ const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
struct drbd_epoch_entry *e;
- struct bio_vec *bvec;
struct page *page;
- struct bio *bio;
- int dgs, ds, i, rr;
+ int dgs, ds, rr;
void *dig_in = mdev->int_dig_in;
void *dig_vv = mdev->int_dig_vv;
+ unsigned long *data;
dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
@@ -1286,29 +1381,44 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
ERR_IF(data_size & 0x1ff) return NULL;
ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
+ /* even though we trust out peer,
+ * we sometimes have to double check. */
+ if (sector + (data_size>>9) > capacity) {
+ dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
+ (unsigned long long)capacity,
+ (unsigned long long)sector, data_size);
+ return NULL;
+ }
+
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
if (!e)
return NULL;
- bio = e->private_bio;
+
ds = data_size;
- bio_for_each_segment(bvec, bio, i) {
- page = bvec->bv_page;
- rr = drbd_recv(mdev, kmap(page), min_t(int, ds, PAGE_SIZE));
+ page = e->pages;
+ page_chain_for_each(page) {
+ unsigned len = min_t(int, ds, PAGE_SIZE);
+ data = kmap(page);
+ rr = drbd_recv(mdev, data, len);
+ if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
+ dev_err(DEV, "Fault injection: Corrupting data on receive\n");
+ data[0] = data[0] ^ (unsigned long)-1;
+ }
kunmap(page);
- if (rr != min_t(int, ds, PAGE_SIZE)) {
+ if (rr != len) {
drbd_free_ee(mdev, e);
dev_warn(DEV, "short read receiving data: read %d expected %d\n",
- rr, min_t(int, ds, PAGE_SIZE));
+ rr, len);
return NULL;
}
ds -= rr;
}
if (dgs) {
- drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+ drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED.\n");
drbd_bcast_ee(mdev, "digest failed",
@@ -1330,7 +1440,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
int rr, rv = 1;
void *data;
- page = drbd_pp_alloc(mdev, 1);
+ if (!data_size)
+ return TRUE;
+
+ page = drbd_pp_alloc(mdev, 1, 1);
data = kmap(page);
while (data_size) {
@@ -1394,7 +1507,7 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
}
if (dgs) {
- drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+ drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
return 0;
@@ -1415,7 +1528,7 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u
D_ASSERT(hlist_unhashed(&e->colision));
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
drbd_set_in_sync(mdev, sector, e->size);
ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
} else {
@@ -1434,30 +1547,28 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
struct drbd_epoch_entry *e;
e = read_in_block(mdev, ID_SYNCER, sector, data_size);
- if (!e) {
- put_ldev(mdev);
- return FALSE;
- }
+ if (!e)
+ goto fail;
dec_rs_pending(mdev);
- e->private_bio->bi_end_io = drbd_endio_write_sec;
- e->private_bio->bi_rw = WRITE;
- e->w.cb = e_end_resync_block;
-
inc_unacked(mdev);
/* corresponding dec_unacked() in e_end_resync_block()
* respective _drbd_clear_done_ee */
+ e->w.cb = e_end_resync_block;
+
spin_lock_irq(&mdev->req_lock);
list_add(&e->w.list, &mdev->sync_ee);
spin_unlock_irq(&mdev->req_lock);
- drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio);
- /* accounting done in endio */
+ if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
+ return TRUE;
- maybe_kick_lo(mdev);
- return TRUE;
+ drbd_free_ee(mdev, e);
+fail:
+ put_ldev(mdev);
+ return FALSE;
}
static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
@@ -1552,7 +1663,7 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T &&
e->flags & EE_MAY_SET_IN_SYNC) ?
@@ -1698,7 +1809,6 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
return FALSE;
}
- e->private_bio->bi_end_io = drbd_endio_write_sec;
e->w.cb = e_end_block;
spin_lock(&mdev->epoch_lock);
@@ -1894,12 +2004,8 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
drbd_al_begin_io(mdev, e->sector);
}
- e->private_bio->bi_rw = rw;
- drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio);
- /* accounting done in endio */
-
- maybe_kick_lo(mdev);
- return TRUE;
+ if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
+ return TRUE;
out_interrupted:
/* yes, the epoch_size now is imbalanced.
@@ -1945,7 +2051,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
"no local data.\n");
drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
P_NEG_RS_DREPLY , p);
- return TRUE;
+ return drbd_drain_block(mdev, h->length - brps);
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
@@ -1957,9 +2063,6 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
return FALSE;
}
- e->private_bio->bi_rw = READ;
- e->private_bio->bi_end_io = drbd_endio_read_sec;
-
switch (h->command) {
case P_DATA_REQUEST:
e->w.cb = w_e_end_data_req;
@@ -2053,10 +2156,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
inc_unacked(mdev);
- drbd_generic_make_request(mdev, fault_type, e->private_bio);
- maybe_kick_lo(mdev);
-
- return TRUE;
+ if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
+ return TRUE;
out_free_e:
kfree(di);
@@ -2473,6 +2574,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
hg > 0 ? "source" : "target");
}
+ if (abs(hg) == 100)
+ drbd_khelper(mdev, "initial-split-brain");
+
if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
int pcount = (mdev->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
@@ -2518,7 +2622,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
* after an attempted attach on a diskless node.
* We just refuse to attach -- well, we drop the "connection"
* to that disk, in a way... */
- dev_alert(DEV, "Split-Brain detected, dropping connection!\n");
+ dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
drbd_khelper(mdev, "split-brain");
return C_MASK;
}
@@ -2849,7 +2953,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
unsigned int max_seg_s;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
- enum drbd_conns nconn;
+ enum dds_flags ddsf;
ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
if (drbd_recv(mdev, h->payload, h->length) != h->length)
@@ -2905,8 +3009,9 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
}
#undef min_not_zero
+ ddsf = be16_to_cpu(p->dds_flags);
if (get_ldev(mdev)) {
- dd = drbd_determin_dev_size(mdev, 0);
+ dd = drbd_determin_dev_size(mdev, ddsf);
put_ldev(mdev);
if (dd == dev_size_error)
return FALSE;
@@ -2916,33 +3021,21 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
drbd_set_my_capacity(mdev, p_size);
}
- if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
- nconn = drbd_sync_handshake(mdev,
- mdev->state.peer, mdev->state.pdsk);
- put_ldev(mdev);
-
- if (nconn == C_MASK) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
- }
-
- if (drbd_request_state(mdev, NS(conn, nconn)) < SS_SUCCESS) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
- }
- }
-
if (get_ldev(mdev)) {
if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
ldsc = 1;
}
- max_seg_s = be32_to_cpu(p->max_segment_size);
+ if (mdev->agreed_pro_version < 94)
+ max_seg_s = be32_to_cpu(p->max_segment_size);
+ else /* drbd 8.3.8 onwards */
+ max_seg_s = DRBD_MAX_SEGMENT_SIZE;
+
if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
drbd_setup_queue_param(mdev, max_seg_s);
- drbd_setup_order_type(mdev, be32_to_cpu(p->queue_order_type));
+ drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
put_ldev(mdev);
}
@@ -2951,14 +3044,17 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
drbd_get_capacity(mdev->this_bdev) || ldsc) {
/* we have different sizes, probably peer
* needs to know my new size... */
- drbd_send_sizes(mdev, 0);
+ drbd_send_sizes(mdev, 0, ddsf);
}
if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
(dd == grew && mdev->state.conn == C_CONNECTED)) {
if (mdev->state.pdsk >= D_INCONSISTENT &&
- mdev->state.disk >= D_INCONSISTENT)
- resync_after_online_grow(mdev);
- else
+ mdev->state.disk >= D_INCONSISTENT) {
+ if (ddsf & DDSF_NO_RESYNC)
+ dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
+ else
+ resync_after_online_grow(mdev);
+ } else
set_bit(RESYNC_AFTER_NEG, &mdev->flags);
}
}
@@ -3490,6 +3586,92 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
return TRUE;
}
+static void timeval_sub_us(struct timeval* tv, unsigned int us)
+{
+ tv->tv_sec -= us / 1000000;
+ us = us % 1000000;
+ if (tv->tv_usec > us) {
+ tv->tv_usec += 1000000;
+ tv->tv_sec--;
+ }
+ tv->tv_usec -= us;
+}
+
+static void got_delay_probe(struct drbd_conf *mdev, int from, struct p_delay_probe *p)
+{
+ struct delay_probe *dp;
+ struct list_head *le;
+ struct timeval now;
+ int seq_num;
+ int offset;
+ int data_delay;
+
+ seq_num = be32_to_cpu(p->seq_num);
+ offset = be32_to_cpu(p->offset);
+
+ spin_lock(&mdev->peer_seq_lock);
+ if (!list_empty(&mdev->delay_probes)) {
+ if (from == USE_DATA_SOCKET)
+ le = mdev->delay_probes.next;
+ else
+ le = mdev->delay_probes.prev;
+
+ dp = list_entry(le, struct delay_probe, list);
+
+ if (dp->seq_num == seq_num) {
+ list_del(le);
+ spin_unlock(&mdev->peer_seq_lock);
+ do_gettimeofday(&now);
+ timeval_sub_us(&now, offset);
+ data_delay =
+ now.tv_usec - dp->time.tv_usec +
+ (now.tv_sec - dp->time.tv_sec) * 1000000;
+
+ if (data_delay > 0)
+ mdev->data_delay = data_delay;
+
+ kfree(dp);
+ return;
+ }
+
+ if (dp->seq_num > seq_num) {
+ spin_unlock(&mdev->peer_seq_lock);
+ dev_warn(DEV, "Previous allocation failure of struct delay_probe?\n");
+ return; /* Do not alloca a struct delay_probe.... */
+ }
+ }
+ spin_unlock(&mdev->peer_seq_lock);
+
+ dp = kmalloc(sizeof(struct delay_probe), GFP_NOIO);
+ if (!dp) {
+ dev_warn(DEV, "Failed to allocate a struct delay_probe, do not worry.\n");
+ return;
+ }
+
+ dp->seq_num = seq_num;
+ do_gettimeofday(&dp->time);
+ timeval_sub_us(&dp->time, offset);
+
+ spin_lock(&mdev->peer_seq_lock);
+ if (from == USE_DATA_SOCKET)
+ list_add(&dp->list, &mdev->delay_probes);
+ else
+ list_add_tail(&dp->list, &mdev->delay_probes);
+ spin_unlock(&mdev->peer_seq_lock);
+}
+
+static int receive_delay_probe(struct drbd_conf *mdev, struct p_header *h)
+{
+ struct p_delay_probe *p = (struct p_delay_probe *)h;
+
+ ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
+ if (drbd_recv(mdev, h->payload, h->length) != h->length)
+ return FALSE;
+
+ got_delay_probe(mdev, USE_DATA_SOCKET, p);
+ return TRUE;
+}
+
typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
static drbd_cmd_handler_f drbd_default_handler[] = {
@@ -3513,6 +3695,7 @@ static drbd_cmd_handler_f drbd_default_handler[] = {
[P_OV_REQUEST] = receive_DataRequest,
[P_OV_REPLY] = receive_DataRequest,
[P_CSUM_RS_REQUEST] = receive_DataRequest,
+ [P_DELAY_PROBE] = receive_delay_probe,
/* anything missing from this table is in
* the asender_tbl, see get_asender_cmd */
[P_MAX_CMD] = NULL,
@@ -3739,7 +3922,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&mdev->pp_in_use);
if (i)
- dev_info(DEV, "pp_in_use = %u, expected 0\n", i);
+ dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
D_ASSERT(list_empty(&mdev->read_ee));
D_ASSERT(list_empty(&mdev->active_ee));
@@ -4232,7 +4415,6 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- D_ASSERT(p->block_id == ID_SYNCER);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
@@ -4290,6 +4472,14 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
return TRUE;
}
+static int got_delay_probe_m(struct drbd_conf *mdev, struct p_header *h)
+{
+ struct p_delay_probe *p = (struct p_delay_probe *)h;
+
+ got_delay_probe(mdev, USE_META_SOCKET, p);
+ return TRUE;
+}
+
struct asender_cmd {
size_t pkt_size;
int (*process)(struct drbd_conf *mdev, struct p_header *h);
@@ -4314,6 +4504,7 @@ static struct asender_cmd *get_asender_cmd(int cmd)
[P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
[P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
+ [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_delay_probe_m },
[P_MAX_CMD] = { 0, NULL },
};
if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index de81ab7b4627..654f1ef5cbb0 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -102,32 +102,7 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
}
}
- /* if it was a local io error, we want to notify our
- * peer about that, and see if we need to
- * detach the disk and stuff.
- * to avoid allocating some special work
- * struct, reuse the request. */
-
- /* THINK
- * why do we do this not when we detect the error,
- * but delay it until it is "done", i.e. possibly
- * until the next barrier ack? */
-
- if (rw == WRITE &&
- ((s & RQ_LOCAL_MASK) && !(s & RQ_LOCAL_OK))) {
- if (!(req->w.list.next == LIST_POISON1 ||
- list_empty(&req->w.list))) {
- /* DEBUG ASSERT only; if this triggers, we
- * probably corrupt the worker list here */
- dev_err(DEV, "req->w.list.next = %p\n", req->w.list.next);
- dev_err(DEV, "req->w.list.prev = %p\n", req->w.list.prev);
- }
- req->w.cb = w_io_error;
- drbd_queue_work(&mdev->data.work, &req->w);
- /* drbd_req_free() is done in w_io_error */
- } else {
- drbd_req_free(req);
- }
+ drbd_req_free(req);
}
static void queue_barrier(struct drbd_conf *mdev)
@@ -453,9 +428,6 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
- dev_alert(DEV, "Local WRITE failed sec=%llus size=%u\n",
- (unsigned long long)req->sector, req->size);
- /* and now: check how to handle local io error. */
__drbd_chk_io_error(mdev, FALSE);
_req_may_be_done(req, m);
put_ldev(mdev);
@@ -475,22 +447,21 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
- dev_alert(DEV, "Local READ failed sec=%llus size=%u\n",
- (unsigned long long)req->sector, req->size);
- /* _req_mod(req,to_be_send); oops, recursion... */
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
- req->rq_state |= RQ_NET_PENDING;
- inc_ap_pending(mdev);
__drbd_chk_io_error(mdev, FALSE);
put_ldev(mdev);
- /* NOTE: if we have no connection,
- * or know the peer has no good data either,
- * then we don't actually need to "queue_for_net_read",
- * but we do so anyways, since the drbd_io_error()
- * and the potential state change to "Diskless"
- * needs to be done from process context */
+ /* no point in retrying if there is no good remote data,
+ * or we have no connection. */
+ if (mdev->state.pdsk != D_UP_TO_DATE) {
+ _req_may_be_done(req, m);
+ break;
+ }
+
+ /* _req_mod(req,to_be_send); oops, recursion... */
+ req->rq_state |= RQ_NET_PENDING;
+ inc_ap_pending(mdev);
/* fall through: _req_mod(req,queue_for_net_read); */
case queue_for_net_read:
@@ -600,6 +571,9 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
_req_may_be_done(req, m);
break;
+ case read_retry_remote_canceled:
+ req->rq_state &= ~RQ_NET_QUEUED;
+ /* fall through, in case we raced with drbd_disconnect */
case connection_lost_while_pending:
/* transfer log cleanup after connection loss */
/* assert something? */
@@ -722,6 +696,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
struct drbd_request *req;
int local, remote;
int err = -EIO;
+ int ret = 0;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -784,7 +759,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
(mdev->state.pdsk == D_INCONSISTENT &&
mdev->state.conn >= C_CONNECTED));
- if (!(local || remote)) {
+ if (!(local || remote) && !mdev->state.susp) {
dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
goto fail_free_complete;
}
@@ -810,6 +785,16 @@ allocate_barrier:
/* GOOD, everything prepared, grab the spin_lock */
spin_lock_irq(&mdev->req_lock);
+ if (mdev->state.susp) {
+ /* If we got suspended, use the retry mechanism of
+ generic_make_request() to restart processing of this
+ bio. In the next call to drbd_make_request_26
+ we sleep in inc_ap_bio() */
+ ret = 1;
+ spin_unlock_irq(&mdev->req_lock);
+ goto fail_free_complete;
+ }
+
if (remote) {
remote = (mdev->state.pdsk == D_UP_TO_DATE ||
(mdev->state.pdsk == D_INCONSISTENT &&
@@ -947,12 +932,14 @@ fail_and_free_req:
req->private_bio = NULL;
put_ldev(mdev);
}
- bio_endio(bio, err);
+ if (!ret)
+ bio_endio(bio, err);
+
drbd_req_free(req);
dec_ap_bio(mdev);
kfree(b);
- return 0;
+ return ret;
}
/* helper function for drbd_make_request
@@ -962,11 +949,6 @@ fail_and_free_req:
*/
static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
{
- /* Unconfigured */
- if (mdev->state.conn == C_DISCONNECTING &&
- mdev->state.disk == D_DISKLESS)
- return 1;
-
if (mdev->state.role != R_PRIMARY &&
(!allow_oos || is_write)) {
if (__ratelimit(&drbd_ratelimit_state)) {
@@ -1070,15 +1052,21 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
/* we need to get a "reference count" (ap_bio_cnt)
* to avoid races with the disconnect/reconnect/suspend code.
- * In case we need to split the bio here, we need to get two references
+ * In case we need to split the bio here, we need to get three references
* atomically, otherwise we might deadlock when trying to submit the
* second one! */
- inc_ap_bio(mdev, 2);
+ inc_ap_bio(mdev, 3);
D_ASSERT(e_enr == s_enr + 1);
- drbd_make_request_common(mdev, &bp->bio1);
- drbd_make_request_common(mdev, &bp->bio2);
+ while (drbd_make_request_common(mdev, &bp->bio1))
+ inc_ap_bio(mdev, 1);
+
+ while (drbd_make_request_common(mdev, &bp->bio2))
+ inc_ap_bio(mdev, 1);
+
+ dec_ap_bio(mdev);
+
bio_pair_release(bp);
}
return 0;
@@ -1115,7 +1103,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
} else if (limit && get_ldev(mdev)) {
struct request_queue * const b =
mdev->ldev->backing_bdev->bd_disk->queue;
- if (b->merge_bvec_fn && mdev->ldev->dc.use_bmbv) {
+ if (b->merge_bvec_fn) {
backing_limit = b->merge_bvec_fn(b, bvm, bvec);
limit = min(limit, backing_limit);
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 16119d7056cc..02d575d24518 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -91,6 +91,7 @@ enum drbd_req_event {
send_failed,
handed_over_to_network,
connection_lost_while_pending,
+ read_retry_remote_canceled,
recv_acked_by_peer,
write_acked_by_peer,
write_acked_by_peer_and_sis, /* and set_in_sync */
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 76863e3f05be..85179e1fb50a 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -70,7 +70,7 @@ static const char *drbd_disk_s_names[] = {
static const char *drbd_state_sw_errors[] = {
[-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config",
- [-SS_NO_UP_TO_DATE_DISK] = "Refusing to be Primary without at least one UpToDate disk",
+ [-SS_NO_UP_TO_DATE_DISK] = "Need access to UpToDate data",
[-SS_NO_LOCAL_DISK] = "Can not resync without local disk",
[-SS_NO_REMOTE_DISK] = "Can not resync without remote disk",
[-SS_CONNECTED_OUTDATES] = "Refusing to be Outdated while Connected",
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index d48a1dfd7b24..b623ceee2a4a 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -47,8 +47,7 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
/* defined here:
drbd_md_io_complete
- drbd_endio_write_sec
- drbd_endio_read_sec
+ drbd_endio_sec
drbd_endio_pri
* more endio handlers:
@@ -85,27 +84,10 @@ void drbd_md_io_complete(struct bio *bio, int error)
/* reads on behalf of the partner,
* "submitted" by the receiver
*/
-void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
+void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
{
unsigned long flags = 0;
- struct drbd_epoch_entry *e = NULL;
- struct drbd_conf *mdev;
- int uptodate = bio_flagged(bio, BIO_UPTODATE);
-
- e = bio->bi_private;
- mdev = e->mdev;
-
- if (error)
- dev_warn(DEV, "read: error=%d s=%llus\n", error,
- (unsigned long long)e->sector);
- if (!error && !uptodate) {
- dev_warn(DEV, "read: setting error to -EIO s=%llus\n",
- (unsigned long long)e->sector);
- /* strange behavior of some lower level drivers...
- * fail the request by clearing the uptodate flag,
- * but do not return any error?! */
- error = -EIO;
- }
+ struct drbd_conf *mdev = e->mdev;
D_ASSERT(e->block_id != ID_VACANT);
@@ -114,49 +96,38 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
list_del(&e->w.list);
if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait);
+ if (test_bit(__EE_WAS_ERROR, &e->flags))
+ __drbd_chk_io_error(mdev, FALSE);
spin_unlock_irqrestore(&mdev->req_lock, flags);
- drbd_chk_io_error(mdev, error, FALSE);
drbd_queue_work(&mdev->data.work, &e->w);
put_ldev(mdev);
}
+static int is_failed_barrier(int ee_flags)
+{
+ return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
+ == (EE_IS_BARRIER|EE_WAS_ERROR);
+}
+
/* writes on behalf of the partner, or resync writes,
- * "submitted" by the receiver.
- */
-void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
+ * "submitted" by the receiver, final stage. */
+static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
{
unsigned long flags = 0;
- struct drbd_epoch_entry *e = NULL;
- struct drbd_conf *mdev;
+ struct drbd_conf *mdev = e->mdev;
sector_t e_sector;
int do_wake;
int is_syncer_req;
int do_al_complete_io;
- int uptodate = bio_flagged(bio, BIO_UPTODATE);
- int is_barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
-
- e = bio->bi_private;
- mdev = e->mdev;
- if (error)
- dev_warn(DEV, "write: error=%d s=%llus\n", error,
- (unsigned long long)e->sector);
- if (!error && !uptodate) {
- dev_warn(DEV, "write: setting error to -EIO s=%llus\n",
- (unsigned long long)e->sector);
- /* strange behavior of some lower level drivers...
- * fail the request by clearing the uptodate flag,
- * but do not return any error?! */
- error = -EIO;
- }
-
- /* error == -ENOTSUPP would be a better test,
- * alas it is not reliable */
- if (error && is_barrier && e->flags & EE_IS_BARRIER) {
+ /* if this is a failed barrier request, disable use of barriers,
+ * and schedule for resubmission */
+ if (is_failed_barrier(e->flags)) {
drbd_bump_write_ordering(mdev, WO_bdev_flush);
spin_lock_irqsave(&mdev->req_lock, flags);
list_del(&e->w.list);
+ e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
e->w.cb = w_e_reissue;
/* put_ldev actually happens below, once we come here again. */
__release(local);
@@ -167,17 +138,16 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
D_ASSERT(e->block_id != ID_VACANT);
- spin_lock_irqsave(&mdev->req_lock, flags);
- mdev->writ_cnt += e->size >> 9;
- is_syncer_req = is_syncer_block_id(e->block_id);
-
/* after we moved e to done_ee,
* we may no longer access it,
* it may be freed/reused already!
* (as soon as we release the req_lock) */
e_sector = e->sector;
do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
+ is_syncer_req = is_syncer_block_id(e->block_id);
+ spin_lock_irqsave(&mdev->req_lock, flags);
+ mdev->writ_cnt += e->size >> 9;
list_del(&e->w.list); /* has been on active_ee or sync_ee */
list_add_tail(&e->w.list, &mdev->done_ee);
@@ -190,7 +160,7 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
? list_empty(&mdev->sync_ee)
: list_empty(&mdev->active_ee);
- if (error)
+ if (test_bit(__EE_WAS_ERROR, &e->flags))
__drbd_chk_io_error(mdev, FALSE);
spin_unlock_irqrestore(&mdev->req_lock, flags);
@@ -205,7 +175,42 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
wake_asender(mdev);
put_ldev(mdev);
+}
+
+/* writes on behalf of the partner, or resync writes,
+ * "submitted" by the receiver.
+ */
+void drbd_endio_sec(struct bio *bio, int error)
+{
+ struct drbd_epoch_entry *e = bio->bi_private;
+ struct drbd_conf *mdev = e->mdev;
+ int uptodate = bio_flagged(bio, BIO_UPTODATE);
+ int is_write = bio_data_dir(bio) == WRITE;
+
+ if (error)
+ dev_warn(DEV, "%s: error=%d s=%llus\n",
+ is_write ? "write" : "read", error,
+ (unsigned long long)e->sector);
+ if (!error && !uptodate) {
+ dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
+ is_write ? "write" : "read",
+ (unsigned long long)e->sector);
+ /* strange behavior of some lower level drivers...
+ * fail the request by clearing the uptodate flag,
+ * but do not return any error?! */
+ error = -EIO;
+ }
+
+ if (error)
+ set_bit(__EE_WAS_ERROR, &e->flags);
+ bio_put(bio); /* no need for the bio anymore */
+ if (atomic_dec_and_test(&e->pending_bios)) {
+ if (is_write)
+ drbd_endio_write_sec_final(e);
+ else
+ drbd_endio_read_sec_final(e);
+ }
}
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
@@ -219,9 +224,6 @@ void drbd_endio_pri(struct bio *bio, int error)
enum drbd_req_event what;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
- if (error)
- dev_warn(DEV, "p %s: error=%d\n",
- bio_data_dir(bio) == WRITE ? "write" : "read", error);
if (!error && !uptodate) {
dev_warn(DEV, "p %s: setting error to -EIO\n",
bio_data_dir(bio) == WRITE ? "write" : "read");
@@ -252,20 +254,6 @@ void drbd_endio_pri(struct bio *bio, int error)
complete_master_bio(mdev, &m);
}
-int w_io_error(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
- struct drbd_request *req = container_of(w, struct drbd_request, w);
-
- /* NOTE: mdev->ldev can be NULL by the time we get here! */
- /* D_ASSERT(mdev->ldev->dc.on_io_error != EP_PASS_ON); */
-
- /* the only way this callback is scheduled is from _req_may_be_done,
- * when it is done and had a local write error, see comments there */
- drbd_req_free(req);
-
- return TRUE;
-}
-
int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
@@ -275,12 +263,9 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* to give the disk the chance to relocate that block */
spin_lock_irq(&mdev->req_lock);
- if (cancel ||
- mdev->state.conn < C_CONNECTED ||
- mdev->state.pdsk <= D_INCONSISTENT) {
- _req_mod(req, send_canceled);
+ if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
+ _req_mod(req, read_retry_remote_canceled);
spin_unlock_irq(&mdev->req_lock);
- dev_alert(DEV, "WE ARE LOST. Local IO failure, no peer.\n");
return 1;
}
spin_unlock_irq(&mdev->req_lock);
@@ -295,7 +280,34 @@ int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
return 1; /* Simply ignore this! */
}
-void drbd_csum(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
+void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
+{
+ struct hash_desc desc;
+ struct scatterlist sg;
+ struct page *page = e->pages;
+ struct page *tmp;
+ unsigned len;
+
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ sg_init_table(&sg, 1);
+ crypto_hash_init(&desc);
+
+ while ((tmp = page_chain_next(page))) {
+ /* all but the last page will be fully used */
+ sg_set_page(&sg, page, PAGE_SIZE, 0);
+ crypto_hash_update(&desc, &sg, sg.length);
+ page = tmp;
+ }
+ /* and now the last, possibly only partially used page */
+ len = e->size & (PAGE_SIZE - 1);
+ sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
+ crypto_hash_update(&desc, &sg, sg.length);
+ crypto_hash_final(&desc, digest);
+}
+
+void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
@@ -329,11 +341,11 @@ static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel
return 1;
}
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
digest_size = crypto_hash_digestsize(mdev->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
inc_rs_pending(mdev);
ok = drbd_send_drequest_csum(mdev,
@@ -369,23 +381,21 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
- if (!e) {
- put_ldev(mdev);
- return 2;
- }
+ if (!e)
+ goto fail;
spin_lock_irq(&mdev->req_lock);
list_add(&e->w.list, &mdev->read_ee);
spin_unlock_irq(&mdev->req_lock);
- e->private_bio->bi_end_io = drbd_endio_read_sec;
- e->private_bio->bi_rw = READ;
e->w.cb = w_e_send_csum;
+ if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
+ return 1;
- mdev->read_cnt += size >> 9;
- drbd_generic_make_request(mdev, DRBD_FAULT_RS_RD, e->private_bio);
-
- return 1;
+ drbd_free_ee(mdev, e);
+fail:
+ put_ldev(mdev);
+ return 2;
}
void resync_timer_fn(unsigned long data)
@@ -414,13 +424,25 @@ void resync_timer_fn(unsigned long data)
drbd_queue_work(&mdev->data.work, &mdev->resync_work);
}
+static int calc_resync_rate(struct drbd_conf *mdev)
+{
+ int d = mdev->data_delay / 1000; /* us -> ms */
+ int td = mdev->sync_conf.throttle_th * 100; /* 0.1s -> ms */
+ int hd = mdev->sync_conf.hold_off_th * 100; /* 0.1s -> ms */
+ int cr = mdev->sync_conf.rate;
+
+ return d <= td ? cr :
+ d >= hd ? 0 :
+ cr + (cr * (td - d) / (hd - td));
+}
+
int w_make_resync_request(struct drbd_conf *mdev,
struct drbd_work *w, int cancel)
{
unsigned long bit;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- int max_segment_size = queue_max_segment_size(mdev->rq_queue);
+ int max_segment_size;
int number, i, size, pe, mx;
int align, queued, sndbuf;
@@ -446,7 +468,13 @@ int w_make_resync_request(struct drbd_conf *mdev,
return 1;
}
- number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
+ /* starting with drbd 8.3.8, we can handle multi-bio EEs,
+ * if it should be necessary */
+ max_segment_size = mdev->agreed_pro_version < 94 ?
+ queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
+
+ mdev->c_sync_rate = calc_resync_rate(mdev);
+ number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
pe = atomic_read(&mdev->rs_pending_cnt);
mutex_lock(&mdev->data.mutex);
@@ -509,12 +537,6 @@ next_sector:
*
* Additionally always align bigger requests, in order to
* be prepared for all stripe sizes of software RAIDs.
- *
- * we _do_ care about the agreed-upon q->max_segment_size
- * here, as splitting up the requests on the other side is more
- * difficult. the consequence is, that on lvm and md and other
- * "indirect" devices, this is dead code, since
- * q->max_segment_size will be PAGE_SIZE.
*/
align = 1;
for (;;) {
@@ -806,7 +828,7 @@ out:
/* helper */
static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
{
- if (drbd_bio_has_active_page(e->private_bio)) {
+ if (drbd_ee_has_active_page(e)) {
/* This might happen if sendpage() has not finished */
spin_lock_irq(&mdev->req_lock);
list_add_tail(&e->w.list, &mdev->net_ee);
@@ -832,7 +854,7 @@ int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
return 1;
}
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
ok = drbd_send_block(mdev, P_DATA_REPLY, e);
} else {
if (__ratelimit(&drbd_ratelimit_state))
@@ -873,7 +895,7 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
put_ldev(mdev);
}
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(mdev);
ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
@@ -921,7 +943,7 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
di = (struct digest_info *)(unsigned long)e->block_id;
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
/* quick hack to try to avoid a race against reconfiguration.
* a real fix would be much more involved,
* introducing more locking mechanisms */
@@ -931,7 +953,7 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
digest = kmalloc(digest_size, GFP_NOIO);
}
if (digest) {
- drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
@@ -973,14 +995,14 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
- if (unlikely(!drbd_bio_uptodate(e->private_bio)))
+ if (unlikely((e->flags & EE_WAS_ERROR) != 0))
goto out;
digest_size = crypto_hash_digestsize(mdev->verify_tfm);
/* FIXME if this allocation fails, online verify will not terminate! */
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
inc_rs_pending(mdev);
ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
digest, digest_size, P_OV_REPLY);
@@ -1029,11 +1051,11 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
di = (struct digest_info *)(unsigned long)e->block_id;
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
digest_size = crypto_hash_digestsize(mdev->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
D_ASSERT(digest_size == di->digest_size);
eq = !memcmp(digest, di->digest, digest_size);
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index f93fa111ce50..defdb5013ea3 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -18,23 +18,9 @@ static inline void drbd_set_my_capacity(struct drbd_conf *mdev,
#define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE)
-static inline int drbd_bio_has_active_page(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- if (page_count(bvec->bv_page) > 1)
- return 1;
- }
-
- return 0;
-}
-
/* bi_end_io handlers */
extern void drbd_md_io_complete(struct bio *bio, int error);
-extern void drbd_endio_read_sec(struct bio *bio, int error);
-extern void drbd_endio_write_sec(struct bio *bio, int error);
+extern void drbd_endio_sec(struct bio *bio, int error);
extern void drbd_endio_pri(struct bio *bio, int error);
/*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a90e83c9be96..6120922f459f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -485,7 +485,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
goto out;
}
- ret = vfs_fsync(file, file->f_path.dentry, 0);
+ ret = vfs_fsync(file, 0);
if (unlikely(ret)) {
ret = -EIO;
goto out;
@@ -495,7 +495,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
ret = lo_send(lo, bio, pos);
if (barrier && !ret) {
- ret = vfs_fsync(file, file->f_path.dentry, 0);
+ ret = vfs_fsync(file, 0);
if (unlikely(ret))
ret = -EIO;
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 59ca2b77b574..ed6fb91123ab 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1004,7 +1004,7 @@ static const struct block_device_operations floppy_fops = {
static int swim3_add_device(struct macio_dev *mdev, int index)
{
- struct device_node *swim = mdev->ofdev.node;
+ struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
@@ -1159,8 +1159,10 @@ static struct of_device_id swim3_match[] =
static struct macio_driver swim3_driver =
{
- .name = "swim3",
- .match_table = swim3_match,
+ .driver = {
+ .name = "swim3",
+ .of_match_table = swim3_match,
+ },
.probe = swim3_attach,
#if 0
.suspend = swim3_suspend,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2138a7ae050c..258bc2ae2885 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -50,7 +50,7 @@ static void blk_done(struct virtqueue *vq)
unsigned long flags;
spin_lock_irqsave(&vblk->lock, flags);
- while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
+ while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
int error;
switch (vbr->status) {
@@ -70,6 +70,8 @@ static void blk_done(struct virtqueue *vq)
vbr->req->sense_len = vbr->in_hdr.sense_len;
vbr->req->errors = vbr->in_hdr.errors;
}
+ if (blk_special_request(vbr->req))
+ vbr->req->errors = (error != 0);
__blk_end_request_all(vbr->req, error);
list_del(&vbr->list);
@@ -103,6 +105,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
break;
+ case REQ_TYPE_SPECIAL:
+ vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+ vbr->out_hdr.sector = 0;
+ vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ break;
case REQ_TYPE_LINUX_BLOCK:
if (req->cmd[0] == REQ_LB_OP_FLUSH) {
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
@@ -151,7 +158,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
+ if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
mempool_free(vbr, vblk->pool);
return false;
}
@@ -180,7 +187,7 @@ static void do_virtblk_request(struct request_queue *q)
}
if (issued)
- vblk->vq->vq_ops->kick(vblk->vq);
+ virtqueue_kick(vblk->vq);
}
static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
@@ -189,12 +196,45 @@ static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
req->cmd[0] = REQ_LB_OP_FLUSH;
}
+/* return id (s/n) string for *disk to *id_str
+ */
+static int virtblk_get_id(struct gendisk *disk, char *id_str)
+{
+ struct virtio_blk *vblk = disk->private_data;
+ struct request *req;
+ struct bio *bio;
+
+ bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
+ GFP_KERNEL);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
+ if (IS_ERR(req)) {
+ bio_put(bio);
+ return PTR_ERR(req);
+ }
+
+ req->cmd_type = REQ_TYPE_SPECIAL;
+ return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+}
+
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
+ if (cmd == 0x56424944) { /* 'VBID' */
+ void __user *usr_data = (void __user *)data;
+ char id_str[VIRTIO_BLK_ID_BYTES];
+ int err;
+
+ err = virtblk_get_id(disk, id_str);
+ if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES))
+ err = -EFAULT;
+ return err;
+ }
/*
* Only allow the generic SCSI ioctls if the host can support it.
*/
@@ -258,7 +298,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
offsetof(struct virtio_blk_config, seg_max),
&sg_elems);
- if (err)
+
+ /* We need at least one SG element, whatever they say. */
+ if (err || !sg_elems)
sg_elems = 1;
/* We need an extra sg elements at head and tail. */
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index e1c95e208a66..a7b83c0a7eb5 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1198,10 +1198,10 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match)
dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match);
/* device id */
- id = of_get_property(op->node, "port-number", NULL);
+ id = of_get_property(op->dev.of_node, "port-number", NULL);
/* physaddr */
- rc = of_address_to_resource(op->node, 0, &res);
+ rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
return rc;
@@ -1209,11 +1209,11 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match)
physaddr = res.start;
/* irq */
- irq = irq_of_parse_and_map(op->node, 0);
+ irq = irq_of_parse_and_map(op->dev.of_node, 0);
/* bus width */
bus_width = ACE_BUS_WIDTH_16;
- if (of_find_property(op->node, "8-bit", NULL))
+ if (of_find_property(op->dev.of_node, "8-bit", NULL))
bus_width = ACE_BUS_WIDTH_8;
/* Call the bus-independant setup code */
@@ -1237,13 +1237,12 @@ static const struct of_device_id ace_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, ace_of_match);
static struct of_platform_driver ace_of_driver = {
- .owner = THIS_MODULE,
- .name = "xsysace",
- .match_table = ace_of_match,
.probe = ace_of_probe,
.remove = __devexit_p(ace_of_remove),
.driver = {
.name = "xsysace",
+ .owner = THIS_MODULE,
+ .of_match_table = ace_of_match,
},
};
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index cc435be0bc13..451cd7071b1d 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -567,7 +567,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct disk_info *d;
struct cdrom_device_info *c;
struct request_queue *q;
- struct device_node *node = vdev->dev.archdata.of_node;
+ struct device_node *node = vdev->dev.of_node;
deviceno = vdev->unit_address;
if (deviceno >= VIOCD_MAX_CD)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e21175be25d0..7cfcc629a7fd 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1121,5 +1121,13 @@ config DEVPORT
source "drivers/s390/char/Kconfig"
+config RAMOOPS
+ tristate "Log panic/oops to a RAM buffer"
+ depends on HAS_IOMEM
+ default n
+ help
+ This enables panic and oops messages to be logged to a circular
+ buffer in RAM where it can be read back at some later point.
+
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d39be4cf1f5d..88d6eac69754 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
obj-$(CONFIG_TCG_TPM) += tpm/
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
+obj-$(CONFIG_RAMOOPS) += ramoops.o
obj-$(CONFIG_JS_RTC) += js-rtc.o
js-rtc-y = rtc.o
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 67ea3a60de74..70312da4c968 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -384,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
{
u32 httfea,baseaddr,enuscr;
struct pci_dev *dev1;
- int i;
+ int i, ret;
unsigned size = amd64_fetch_size();
dev_info(&pdev->dev, "setting up ULi AGP\n");
@@ -400,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
if (i == ARRAY_SIZE(uli_sizes)) {
dev_info(&pdev->dev, "no ULi size found for %d\n", size);
- return -ENODEV;
+ ret = -ENODEV;
+ goto put;
}
/* shadow x86-64 registers into ULi registers */
pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
/* if x86-64 aperture base is beyond 4G, exit here */
- if ((httfea & 0x7fff) >> (32 - 25))
- return -ENODEV;
+ if ((httfea & 0x7fff) >> (32 - 25)) {
+ ret = -ENODEV;
+ goto put;
+ }
httfea = (httfea& 0x7fff) << 25;
@@ -420,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
enuscr= httfea+ (size * 1024 * 1024) - 1;
pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
-
+ ret = 0;
+put:
pci_dev_put(dev1);
- return 0;
+ return ret;
}
@@ -441,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
{
u32 tmp, apbase, apbar, aplimit;
struct pci_dev *dev1;
- int i;
+ int i, ret;
unsigned size = amd64_fetch_size();
dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
@@ -458,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
if (i == ARRAY_SIZE(nforce3_sizes)) {
dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
- return -ENODEV;
+ ret = -ENODEV;
+ goto put;
}
pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
@@ -472,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
/* if x86-64 aperture base is beyond 4G, exit here */
if ( (apbase & 0x7fff) >> (32 - 25) ) {
dev_info(&pdev->dev, "aperture base > 4G\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto put;
}
apbase = (apbase & 0x7fff) << 25;
@@ -488,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev)
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
+ ret = 0;
+put:
pci_dev_put(dev1);
- return 0;
+ return ret;
}
static int __devinit agp_amd64_probe(struct pci_dev *pdev,
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index e8ea6825822c..9344216183a4 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1059,7 +1059,7 @@ static void intel_i9xx_setup_flush(void)
}
}
-static int intel_i915_configure(void)
+static int intel_i9xx_configure(void)
{
struct aper_size_info_fixed *current_size;
u32 temp;
@@ -1207,6 +1207,38 @@ static int intel_i9xx_fetch_size(void)
return 0;
}
+static int intel_i915_get_gtt_size(void)
+{
+ int size;
+
+ if (IS_G33) {
+ u16 gmch_ctrl;
+
+ /* G33's GTT size defined in gmch_ctrl */
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+ switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
+ case G33_PGETBL_SIZE_1M:
+ size = 1024;
+ break;
+ case G33_PGETBL_SIZE_2M:
+ size = 2048;
+ break;
+ default:
+ dev_info(&agp_bridge->dev->dev,
+ "unknown page table size 0x%x, assuming 512KB\n",
+ (gmch_ctrl & G33_PGETBL_SIZE_MASK));
+ size = 512;
+ }
+ } else {
+ /* On previous hardware, the GTT size was just what was
+ * required to map the aperture.
+ */
+ size = agp_bridge->driver->fetch_size();
+ }
+
+ return KB(size);
+}
+
/* The intel i915 automatically initializes the agp aperture during POST.
* Use the memory already set aside for in the GTT.
*/
@@ -1216,7 +1248,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
struct aper_size_info_fixed *size;
int num_entries;
u32 temp, temp2;
- int gtt_map_size = 256 * 1024;
+ int gtt_map_size;
size = agp_bridge->current_size;
page_order = size->page_order;
@@ -1226,8 +1258,8 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
- if (IS_G33)
- gtt_map_size = 1024 * 1024; /* 1M on G33 */
+ gtt_map_size = intel_i915_get_gtt_size();
+
intel_private.gtt = ioremap(temp2, gtt_map_size);
if (!intel_private.gtt)
return -ENOMEM;
@@ -1422,7 +1454,7 @@ static const struct agp_bridge_driver intel_915_driver = {
.size_type = FIXED_APER_SIZE,
.num_aperture_sizes = 4,
.needs_scratch_page = true,
- .configure = intel_i915_configure,
+ .configure = intel_i9xx_configure,
.fetch_size = intel_i9xx_fetch_size,
.cleanup = intel_i915_cleanup,
.mask_memory = intel_i810_mask_memory,
@@ -1455,7 +1487,7 @@ static const struct agp_bridge_driver intel_i965_driver = {
.size_type = FIXED_APER_SIZE,
.num_aperture_sizes = 4,
.needs_scratch_page = true,
- .configure = intel_i915_configure,
+ .configure = intel_i9xx_configure,
.fetch_size = intel_i9xx_fetch_size,
.cleanup = intel_i915_cleanup,
.mask_memory = intel_i965_mask_memory,
@@ -1488,7 +1520,7 @@ static const struct agp_bridge_driver intel_g33_driver = {
.size_type = FIXED_APER_SIZE,
.num_aperture_sizes = 4,
.needs_scratch_page = true,
- .configure = intel_i915_configure,
+ .configure = intel_i9xx_configure,
.fetch_size = intel_i9xx_fetch_size,
.cleanup = intel_i915_cleanup,
.mask_memory = intel_i965_mask_memory,
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 95db71360d24..f845a8f718b3 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -415,7 +415,7 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_table_real = (u32 *) table;
/* Need to clear out any dirty data still sitting in caches */
flush_dcache_range((unsigned long)table,
- (unsigned long)(table_end + PAGE_SIZE));
+ (unsigned long)table_end + 1);
bridge->gatt_table = vmap(pages, (1 << page_order), 0, PAGE_KERNEL_NCG);
if (bridge->gatt_table == NULL)
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 56b27671adc4..4f8d60c25a98 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -84,6 +84,7 @@ static char *serial_version = "4.30";
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/bitops.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
@@ -1954,29 +1955,16 @@ static const struct tty_operations serial_ops = {
/*
* The serial driver boot-time initialization code!
*/
-static int __init rs_init(void)
+static int __init amiga_serial_probe(struct platform_device *pdev)
{
unsigned long flags;
struct serial_state * state;
int error;
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_SERIAL))
- return -ENODEV;
-
serial_driver = alloc_tty_driver(1);
if (!serial_driver)
return -ENOMEM;
- /*
- * We request SERDAT and SERPER only, because the serial registers are
- * too spreaded over the custom register space
- */
- if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4,
- "amiserial [Paula]")) {
- error = -EBUSY;
- goto fail_put_tty_driver;
- }
-
IRQ_ports = NULL;
show_serial_version();
@@ -1998,7 +1986,7 @@ static int __init rs_init(void)
error = tty_register_driver(serial_driver);
if (error)
- goto fail_release_mem_region;
+ goto fail_put_tty_driver;
state = rs_table;
state->magic = SSTATE_MAGIC;
@@ -2050,23 +2038,24 @@ static int __init rs_init(void)
ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */
ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */
+ platform_set_drvdata(pdev, state);
+
return 0;
fail_free_irq:
free_irq(IRQ_AMIGA_TBE, state);
fail_unregister:
tty_unregister_driver(serial_driver);
-fail_release_mem_region:
- release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
fail_put_tty_driver:
put_tty_driver(serial_driver);
return error;
}
-static __exit void rs_exit(void)
+static int __exit amiga_serial_remove(struct platform_device *pdev)
{
int error;
- struct async_struct *info = rs_table[0].info;
+ struct serial_state *state = platform_get_drvdata(pdev);
+ struct async_struct *info = state->info;
/* printk("Unloading %s: version %s\n", serial_name, serial_version); */
tasklet_kill(&info->tlet);
@@ -2075,19 +2064,38 @@ static __exit void rs_exit(void)
error);
put_tty_driver(serial_driver);
- if (info) {
- rs_table[0].info = NULL;
- kfree(info);
- }
+ rs_table[0].info = NULL;
+ kfree(info);
free_irq(IRQ_AMIGA_TBE, rs_table);
free_irq(IRQ_AMIGA_RBF, rs_table);
- release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
+ platform_set_drvdata(pdev, NULL);
+
+ return error;
+}
+
+static struct platform_driver amiga_serial_driver = {
+ .remove = __exit_p(amiga_serial_remove),
+ .driver = {
+ .name = "amiga-serial",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amiga_serial_init(void)
+{
+ return platform_driver_probe(&amiga_serial_driver, amiga_serial_probe);
+}
+
+module_init(amiga_serial_init);
+
+static void __exit amiga_serial_exit(void)
+{
+ platform_driver_unregister(&amiga_serial_driver);
}
-module_init(rs_init)
-module_exit(rs_exit)
+module_exit(amiga_serial_exit);
#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE)
@@ -2154,3 +2162,4 @@ console_initcall(amiserial_console_init);
#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-serial");
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 4f568cb9af3f..033e1505fca9 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -265,8 +265,8 @@ static unsigned int apm_poll(struct file *fp, poll_table * wait)
* Only when everyone who has opened /dev/apm_bios with write permission
* has acknowledge does the actual suspend happen.
*/
-static int
-apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
+static long
+apm_ioctl(struct file *filp, u_int cmd, u_long arg)
{
struct apm_user *as = filp->private_data;
int err = -EINVAL;
@@ -274,6 +274,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
if (!as->suser || !as->writer)
return -EPERM;
+ lock_kernel();
switch (cmd) {
case APM_IOC_SUSPEND:
mutex_lock(&state_lock);
@@ -334,6 +335,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
mutex_unlock(&state_lock);
break;
}
+ unlock_kernel();
return err;
}
@@ -397,7 +399,7 @@ static const struct file_operations apm_bios_fops = {
.owner = THIS_MODULE,
.read = apm_read,
.poll = apm_poll,
- .ioctl = apm_ioctl,
+ .unlocked_ioctl = apm_ioctl,
.open = apm_open,
.release = apm_release,
};
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index a7424bf7eacf..f4ae0e0fb631 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/smp_lock.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/wait.h>
@@ -106,8 +107,7 @@ static unsigned int DeviceErrorCount; /* number of device error */
static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *);
static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *);
-static int ac_ioctl(struct inode *, struct file *, unsigned int,
- unsigned long);
+static long ac_ioctl(struct file *, unsigned int, unsigned long);
static irqreturn_t ac_interrupt(int, void *);
static const struct file_operations ac_fops = {
@@ -115,7 +115,7 @@ static const struct file_operations ac_fops = {
.llseek = no_llseek,
.read = ac_read,
.write = ac_write,
- .ioctl = ac_ioctl,
+ .unlocked_ioctl = ac_ioctl,
};
static struct miscdevice ac_miscdev = {
@@ -689,7 +689,7 @@ static irqreturn_t ac_interrupt(int vec, void *dev_instance)
-static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{ /* @ ADG ou ATO selon le cas */
int i;
@@ -703,15 +703,11 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
/* In general, the device is only openable by root anyway, so we're not
particularly concerned that bogus ioctls can flood the console. */
- adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL);
- if (!adgl)
- return -ENOMEM;
+ adgl = memdup_user(argp, sizeof(struct st_ram_io));
+ if (IS_ERR(adgl))
+ return PTR_ERR(adgl);
- if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) {
- kfree(adgl);
- return -EFAULT;
- }
-
+ lock_kernel();
IndexCard = adgl->num_card-1;
if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
@@ -721,6 +717,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
warncount--;
}
kfree(adgl);
+ unlock_kernel();
return -EINVAL;
}
@@ -838,6 +835,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
Dummy = readb(apbs[IndexCard].RamIO + VERS);
kfree(adgl);
+ unlock_kernel();
return 0;
}
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index 61f0146e215d..dbee8688f75c 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -232,7 +232,7 @@ ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
}
static int
-ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+ds1620_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct therm therm;
union {
@@ -316,6 +316,18 @@ ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
return 0;
}
+static long
+ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ds1620_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
#ifdef THERM_USE_PROC
static int
proc_therm_ds1620_read(char *buf, char **start, off_t offset,
@@ -344,7 +356,7 @@ static const struct file_operations ds1620_fops = {
.owner = THIS_MODULE,
.open = ds1620_open,
.read = ds1620_read,
- .ioctl = ds1620_ioctl,
+ .unlocked_ioctl = ds1620_unlocked_ioctl,
};
static struct miscdevice ds1620_miscdev = {
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 045c930e6320..e3859d4eaead 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -93,8 +93,8 @@ static ssize_t dtlk_write(struct file *, const char __user *,
static unsigned int dtlk_poll(struct file *, poll_table *);
static int dtlk_open(struct inode *, struct file *);
static int dtlk_release(struct inode *, struct file *);
-static int dtlk_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
+static long dtlk_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
static const struct file_operations dtlk_fops =
{
@@ -102,7 +102,7 @@ static const struct file_operations dtlk_fops =
.read = dtlk_read,
.write = dtlk_write,
.poll = dtlk_poll,
- .ioctl = dtlk_ioctl,
+ .unlocked_ioctl = dtlk_ioctl,
.open = dtlk_open,
.release = dtlk_release,
};
@@ -263,10 +263,9 @@ static void dtlk_timer_tick(unsigned long data)
wake_up_interruptible(&dtlk_process_list);
}
-static int dtlk_ioctl(struct inode *inode,
- struct file *file,
- unsigned int cmd,
- unsigned long arg)
+static long dtlk_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
{
char __user *argp = (char __user *)arg;
struct dtlk_settings *sp;
@@ -276,7 +275,9 @@ static int dtlk_ioctl(struct inode *inode,
switch (cmd) {
case DTLK_INTERROGATE:
+ lock_kernel();
sp = dtlk_interrogate();
+ unlock_kernel();
if (copy_to_user(argp, sp, sizeof(struct dtlk_settings)))
return -EINVAL;
return 0;
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
index fda4181b5e67..82b5a88a82d7 100644
--- a/drivers/char/generic_nvram.c
+++ b/drivers/char/generic_nvram.c
@@ -19,6 +19,7 @@
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/init.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#ifdef CONFIG_PPC_PMAC
@@ -84,8 +85,7 @@ static ssize_t write_nvram(struct file *file, const char __user *buf,
return p - buf;
}
-static int nvram_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch(cmd) {
#ifdef CONFIG_PPC_PMAC
@@ -116,12 +116,23 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
return 0;
}
+static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = nvram_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
const struct file_operations nvram_fops = {
.owner = THIS_MODULE,
.llseek = nvram_llseek,
.read = read_nvram,
.write = write_nvram,
- .ioctl = nvram_ioctl,
+ .unlocked_ioctl = nvram_unlocked_ioctl,
};
static struct miscdevice nvram_dev = {
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 31e7c91c2d9d..b6c2cc167c11 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -262,7 +262,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit)
#endif
}
-static int gen_rtc_ioctl(struct inode *inode, struct file *file,
+static int gen_rtc_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct rtc_time wtime;
@@ -332,6 +332,18 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
return -EINVAL;
}
+static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = gen_rtc_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* We enforce only one user at a time here with the open/close.
* Also clear the previous interrupt data on an open, and clean
@@ -482,7 +494,7 @@ static const struct file_operations gen_rtc_fops = {
.read = gen_rtc_read,
.poll = gen_rtc_poll,
#endif
- .ioctl = gen_rtc_ioctl,
+ .unlocked_ioctl = gen_rtc_unlocked_ioctl,
.open = gen_rtc_open,
.release = gen_rtc_release,
};
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 712d9f271aa6..e0249722d25f 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -49,8 +49,9 @@
#include <asm/uaccess.h>
#include <linux/sysrq.h>
#include <linux/timer.h>
+#include <linux/time.h>
-#define VERSION_STR "0.9.0"
+#define VERSION_STR "0.9.1"
#define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */
#define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */
@@ -119,10 +120,8 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
#if defined(CONFIG_S390)
# define HAVE_MONOTONIC
# define TIMER_FREQ 1000000000ULL
-#elif defined(CONFIG_IA64)
-# define TIMER_FREQ ((unsigned long long)local_cpu_data->itc_freq)
#else
-# define TIMER_FREQ (HZ*loops_per_jiffy)
+# define TIMER_FREQ 1000000000ULL
#endif
#ifdef HAVE_MONOTONIC
@@ -130,7 +129,9 @@ extern unsigned long long monotonic_clock(void);
#else
static inline unsigned long long monotonic_clock(void)
{
- return get_cycles();
+ struct timespec ts;
+ getrawmonotonic(&ts);
+ return timespec_to_ns(&ts);
}
#endif /* HAVE_MONOTONIC */
@@ -168,6 +169,13 @@ static void hangcheck_fire(unsigned long data)
printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
}
}
+#if 0
+ /*
+ * Enable to investigate delays in detail
+ */
+ printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
+ tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
+#endif
mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
hangcheck_tsc = monotonic_clock();
}
@@ -180,7 +188,7 @@ static int __init hangcheck_init(void)
#if defined (HAVE_MONOTONIC)
printk("Hangcheck: Using monotonic_clock().\n");
#else
- printk("Hangcheck: Using get_cycles().\n");
+ printk("Hangcheck: Using getrawmonotonic().\n");
#endif /* HAVE_MONOTONIC */
hangcheck_tsc_margin =
(unsigned long long)(hangcheck_margin + hangcheck_tick);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 9ded667625ac..a0a1829d3198 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -431,14 +431,18 @@ static int hpet_release(struct inode *inode, struct file *file)
static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
-static int
-hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static long hpet_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
struct hpet_dev *devp;
+ int ret;
devp = file->private_data;
- return hpet_ioctl_common(devp, cmd, arg, 0);
+ lock_kernel();
+ ret = hpet_ioctl_common(devp, cmd, arg, 0);
+ unlock_kernel();
+
+ return ret;
}
static int hpet_ioctl_ieon(struct hpet_dev *devp)
@@ -654,7 +658,7 @@ static const struct file_operations hpet_fops = {
.llseek = no_llseek,
.read = hpet_read,
.poll = hpet_poll,
- .ioctl = hpet_ioctl,
+ .unlocked_ioctl = hpet_ioctl,
.open = hpet_open,
.release = hpet_release,
.fasync = hpet_fasync,
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 793b236c9266..d4b14ff1c4c1 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -194,10 +194,8 @@ static inline void print_state(struct hvsi_struct *hp)
"HVSI_WAIT_FOR_MCTRL_RESPONSE",
"HVSI_FSP_DIED",
};
- const char *name = state_names[hp->state];
-
- if (hp->state > ARRAY_SIZE(state_names))
- name = "UNKNOWN";
+ const char *name = (hp->state < ARRAY_SIZE(state_names))
+ ? state_names[hp->state] : "UNKNOWN";
pr_debug("hvsi%i: state = %s\n", hp->index, name);
#endif /* DEBUG */
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 10f868eefaa6..0f9cbf1aaf15 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -660,7 +660,7 @@ static int __devinit n2rng_probe(struct of_device *op,
np->hvapi_major);
goto out_hvapi_unregister;
}
- np->num_units = of_getintprop_default(op->node,
+ np->num_units = of_getintprop_default(op->dev.of_node,
"rng-#units", 0);
if (!np->num_units) {
dev_err(&op->dev, "VF RNG lacks rng-#units property\n");
@@ -751,8 +751,11 @@ static const struct of_device_id n2rng_match[] = {
MODULE_DEVICE_TABLE(of, n2rng_match);
static struct of_platform_driver n2rng_driver = {
- .name = "n2rng",
- .match_table = n2rng_match,
+ .driver = {
+ .name = "n2rng",
+ .owner = THIS_MODULE,
+ .of_match_table = n2rng_match,
+ },
.probe = n2rng_probe,
.remove = __devexit_p(n2rng_remove),
};
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index a8b4c4010144..a348c7e9aa0b 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -15,6 +15,10 @@
#include <linux/amba/bus.h>
#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+static struct clk *rng_clk;
static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
@@ -40,6 +44,15 @@ static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
void __iomem *base;
int ret;
+ rng_clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(rng_clk)) {
+ dev_err(&dev->dev, "could not get rng clock\n");
+ ret = PTR_ERR(rng_clk);
+ return ret;
+ }
+
+ clk_enable(rng_clk);
+
ret = amba_request_regions(dev, dev->dev.init_name);
if (ret)
return ret;
@@ -57,6 +70,8 @@ out_unmap:
iounmap(base);
out_release:
amba_release_regions(dev);
+ clk_disable(rng_clk);
+ clk_put(rng_clk);
return ret;
}
@@ -66,6 +81,8 @@ static int nmk_rng_remove(struct amba_device *dev)
hwrng_unregister(&nmk_rng);
iounmap(base);
amba_release_regions(dev);
+ clk_disable(rng_clk);
+ clk_put(rng_clk);
return 0;
}
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 7fa61dd1d9d9..261ba8f22b8b 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -98,7 +98,7 @@ static int __devinit rng_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
void __iomem *rng_regs;
- struct device_node *rng_np = ofdev->node;
+ struct device_node *rng_np = ofdev->dev.of_node;
struct resource res;
int err = 0;
@@ -140,8 +140,11 @@ static struct of_device_id rng_match[] = {
};
static struct of_platform_driver rng_driver = {
- .name = "pasemi-rng",
- .match_table = rng_match,
+ .driver = {
+ .name = "pasemi-rng",
+ .owner = THIS_MODULE,
+ .of_match_table = rng_match,
+ },
.probe = rng_probe,
.remove = rng_remove,
};
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 64fe0a793efd..75f1cbd61c17 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -32,7 +32,7 @@ static bool busy;
static void random_recv_done(struct virtqueue *vq)
{
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
- if (!vq->vq_ops->get_buf(vq, &data_avail))
+ if (!virtqueue_get_buf(vq, &data_avail))
return;
complete(&have_data);
@@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
- if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0)
+ if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
BUG();
- vq->vq_ops->kick(vq);
+ virtqueue_kick(vq);
}
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 65545de3dbf4..d8ec92a38980 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -228,8 +228,7 @@ static int handle_send_req(ipmi_user_t user,
return rv;
}
-static int ipmi_ioctl(struct inode *inode,
- struct file *file,
+static int ipmi_ioctl(struct file *file,
unsigned int cmd,
unsigned long data)
{
@@ -630,6 +629,23 @@ static int ipmi_ioctl(struct inode *inode,
return rv;
}
+/*
+ * Note: it doesn't make sense to take the BKL here but
+ * not in compat_ipmi_ioctl. -arnd
+ */
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long data)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ipmi_ioctl(file, cmd, data);
+ unlock_kernel();
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
/*
@@ -802,7 +818,7 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
if (copy_to_user(precv64, &recv64, sizeof(recv64)))
return -EFAULT;
- rc = ipmi_ioctl(filep->f_path.dentry->d_inode, filep,
+ rc = ipmi_ioctl(filep,
((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
? IPMICTL_RECEIVE_MSG
: IPMICTL_RECEIVE_MSG_TRUNC),
@@ -819,14 +835,14 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
return rc;
}
default:
- return ipmi_ioctl(filep->f_path.dentry->d_inode, filep, cmd, arg);
+ return ipmi_ioctl(filep, cmd, arg);
}
}
#endif
static const struct file_operations ipmi_fops = {
.owner = THIS_MODULE,
- .ioctl = ipmi_ioctl,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_ipmi_ioctl,
#endif
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c6ad4234378d..4f3f8c9ec262 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2505,12 +2505,11 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
return rv;
}
- printk(KERN_INFO
- "ipmi: Found new BMC (man_id: 0x%6.6x, "
- " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
- bmc->id.manufacturer_id,
- bmc->id.product_id,
- bmc->id.device_id);
+ dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
+ "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
}
/*
@@ -4037,8 +4036,8 @@ static void ipmi_request_event(void)
static struct timer_list ipmi_timer;
-/* Call every ~100 ms. */
-#define IPMI_TIMEOUT_TIME 100
+/* Call every ~1000 ms. */
+#define IPMI_TIMEOUT_TIME 1000
/* How many jiffies does it take to get to the timeout time. */
#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 4462b113ba3f..35603dd4e6c5 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -107,6 +107,14 @@ enum si_type {
};
static char *si_to_str[] = { "kcs", "smic", "bt" };
+enum ipmi_addr_src {
+ SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
+ SI_PCI, SI_DEVICETREE, SI_DEFAULT
+};
+static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
+ "ACPI", "SMBIOS", "PCI",
+ "device-tree", "default" };
+
#define DEVICE_NAME "ipmi_si"
static struct platform_driver ipmi_driver = {
@@ -188,7 +196,7 @@ struct smi_info {
int (*irq_setup)(struct smi_info *info);
void (*irq_cleanup)(struct smi_info *info);
unsigned int io_size;
- char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
void (*addr_source_cleanup)(struct smi_info *info);
void *addr_source_data;
@@ -300,6 +308,7 @@ static int num_max_busy_us;
static int unload_when_empty = 1;
+static int add_smi(struct smi_info *smi);
static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *to_clean);
@@ -314,9 +323,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
{
/* Deliver the message to the upper layer with the lock
released. */
- spin_unlock(&(smi_info->si_lock));
- ipmi_smi_msg_received(smi_info->intf, msg);
- spin_lock(&(smi_info->si_lock));
+
+ if (smi_info->run_to_completion) {
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ } else {
+ spin_unlock(&(smi_info->si_lock));
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ spin_lock(&(smi_info->si_lock));
+ }
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -445,6 +459,9 @@ static inline void disable_si_irq(struct smi_info *smi_info)
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
start_disable_irq(smi_info);
smi_info->interrupt_disabled = 1;
+ if (!atomic_read(&smi_info->stop_operation))
+ mod_timer(&smi_info->si_timer,
+ jiffies + SI_TIMEOUT_JIFFIES);
}
}
@@ -576,9 +593,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
- printk(KERN_WARNING
- "ipmi_si: Error clearing flags: %2.2x\n",
- msg[2]);
+ dev_warn(smi_info->dev,
+ "Error clearing flags: %2.2x\n", msg[2]);
}
if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
start_enable_irq(smi_info);
@@ -670,9 +686,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not enable interrupts"
- ", failed get, using polled mode.\n");
+ dev_warn(smi_info->dev, "Could not enable interrupts"
+ ", failed get, using polled mode.\n");
smi_info->si_state = SI_NORMAL;
} else {
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -693,11 +708,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
- if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not enable interrupts"
- ", failed set, using polled mode.\n");
- }
+ if (msg[2] != 0)
+ dev_warn(smi_info->dev, "Could not enable interrupts"
+ ", failed set, using polled mode.\n");
+ else
+ smi_info->interrupt_disabled = 0;
smi_info->si_state = SI_NORMAL;
break;
}
@@ -709,9 +724,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not disable interrupts"
- ", failed get.\n");
+ dev_warn(smi_info->dev, "Could not disable interrupts"
+ ", failed get.\n");
smi_info->si_state = SI_NORMAL;
} else {
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -733,9 +747,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not disable interrupts"
- ", failed set.\n");
+ dev_warn(smi_info->dev, "Could not disable interrupts"
+ ", failed set.\n");
}
smi_info->si_state = SI_NORMAL;
break;
@@ -877,6 +890,11 @@ static void sender(void *send_info,
printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
+ mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+
if (smi_info->run_to_completion) {
/*
* If we are running to completion, then throw it in
@@ -997,6 +1015,8 @@ static int ipmi_thread(void *data)
; /* do nothing */
else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
schedule();
+ else if (smi_result == SI_SM_IDLE)
+ schedule_timeout_interruptible(100);
else
schedule_timeout_interruptible(0);
}
@@ -1039,6 +1059,7 @@ static void smi_timeout(unsigned long data)
unsigned long flags;
unsigned long jiffies_now;
long time_diff;
+ long timeout;
#ifdef DEBUG_TIMING
struct timeval t;
#endif
@@ -1059,9 +1080,9 @@ static void smi_timeout(unsigned long data)
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
- smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
- goto do_add_timer;
+ goto do_mod_timer;
}
/*
@@ -1070,14 +1091,15 @@ static void smi_timeout(unsigned long data)
*/
if (smi_result == SI_SM_CALL_WITH_DELAY) {
smi_inc_stat(smi_info, short_timeouts);
- smi_info->si_timer.expires = jiffies + 1;
+ timeout = jiffies + 1;
} else {
smi_inc_stat(smi_info, long_timeouts);
- smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
- do_add_timer:
- add_timer(&(smi_info->si_timer));
+ do_mod_timer:
+ if (smi_result != SI_SM_IDLE)
+ mod_timer(&(smi_info->si_timer), timeout);
}
static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1144,10 +1166,10 @@ static int smi_start_processing(void *send_info,
new_smi->thread = kthread_run(ipmi_thread, new_smi,
"kipmi%d", new_smi->intf_num);
if (IS_ERR(new_smi->thread)) {
- printk(KERN_NOTICE "ipmi_si_intf: Could not start"
- " kernel thread due to error %ld, only using"
- " timers to drive the interface\n",
- PTR_ERR(new_smi->thread));
+ dev_notice(new_smi->dev, "Could not start"
+ " kernel thread due to error %ld, only using"
+ " timers to drive the interface\n",
+ PTR_ERR(new_smi->thread));
new_smi->thread = NULL;
}
}
@@ -1308,14 +1330,13 @@ static int std_irq_setup(struct smi_info *info)
DEVICE_NAME,
info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: %s unable to claim interrupt %d,"
- " running polled\n",
- DEVICE_NAME, info->irq);
+ dev_warn(info->dev, "%s unable to claim interrupt %d,"
+ " running polled\n",
+ DEVICE_NAME, info->irq);
info->irq = 0;
} else {
info->irq_cleanup = std_irq_cleanup;
- printk(" Using irq %d\n", info->irq);
+ dev_info(info->dev, "Using irq %d\n", info->irq);
}
return rv;
@@ -1406,8 +1427,8 @@ static int port_setup(struct smi_info *info)
info->io.outputb = port_outl;
break;
default:
- printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
- info->io.regsize);
+ dev_warn(info->dev, "Invalid register size: %d\n",
+ info->io.regsize);
return -EINVAL;
}
@@ -1529,8 +1550,8 @@ static int mem_setup(struct smi_info *info)
break;
#endif
default:
- printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
- info->io.regsize);
+ dev_warn(info->dev, "Invalid register size: %d\n",
+ info->io.regsize);
return -EINVAL;
}
@@ -1755,7 +1776,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
goto out;
}
- info->addr_source = "hotmod";
+ info->addr_source = SI_HOTMOD;
info->si_type = si_type;
info->io.addr_data = addr;
info->io.addr_type = addr_space;
@@ -1777,7 +1798,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
info->irq_setup = std_irq_setup;
info->slave_addr = ipmb;
- try_smi_init(info);
+ if (!add_smi(info))
+ if (try_smi_init(info))
+ cleanup_one_si(info);
} else {
/* remove */
struct smi_info *e, *tmp_e;
@@ -1813,7 +1836,8 @@ static __devinit void hardcode_find_bmc(void)
if (!info)
return;
- info->addr_source = "hardcoded";
+ info->addr_source = SI_HARDCODED;
+ printk(KERN_INFO PFX "probing via hardcoded address\n");
if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
info->si_type = SI_KCS;
@@ -1822,8 +1846,7 @@ static __devinit void hardcode_find_bmc(void)
} else if (strcmp(si_type[i], "bt") == 0) {
info->si_type = SI_BT;
} else {
- printk(KERN_WARNING
- "ipmi_si: Interface type specified "
+ printk(KERN_WARNING PFX "Interface type specified "
"for interface %d, was invalid: %s\n",
i, si_type[i]);
kfree(info);
@@ -1841,11 +1864,9 @@ static __devinit void hardcode_find_bmc(void)
info->io.addr_data = addrs[i];
info->io.addr_type = IPMI_MEM_ADDR_SPACE;
} else {
- printk(KERN_WARNING
- "ipmi_si: Interface type specified "
- "for interface %d, "
- "but port and address were not set or "
- "set to zero.\n", i);
+ printk(KERN_WARNING PFX "Interface type specified "
+ "for interface %d, but port and address were "
+ "not set or set to zero.\n", i);
kfree(info);
continue;
}
@@ -1863,7 +1884,9 @@ static __devinit void hardcode_find_bmc(void)
info->irq_setup = std_irq_setup;
info->slave_addr = slave_addrs[i];
- try_smi_init(info);
+ if (!add_smi(info))
+ if (try_smi_init(info))
+ cleanup_one_si(info);
}
}
@@ -1923,15 +1946,13 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
&ipmi_acpi_gpe,
info);
if (status != AE_OK) {
- printk(KERN_WARNING
- "ipmi_si: %s unable to claim ACPI GPE %d,"
- " running polled\n",
- DEVICE_NAME, info->irq);
+ dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
+ " running polled\n", DEVICE_NAME, info->irq);
info->irq = 0;
return -EINVAL;
} else {
info->irq_cleanup = acpi_gpe_irq_cleanup;
- printk(" Using ACPI GPE %d\n", info->irq);
+ dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
return 0;
}
}
@@ -1989,8 +2010,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
u8 addr_space;
if (spmi->IPMIlegacy != 1) {
- printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
- return -ENODEV;
+ printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+ return -ENODEV;
}
if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -2000,11 +2021,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
- printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
+ printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
return -ENOMEM;
}
- info->addr_source = "SPMI";
+ info->addr_source = SI_SPMI;
+ printk(KERN_INFO PFX "probing via SPMI\n");
/* Figure out the interface type. */
switch (spmi->InterfaceType) {
@@ -2018,8 +2040,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
info->si_type = SI_BT;
break;
default:
- printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
- spmi->InterfaceType);
+ printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
+ spmi->InterfaceType);
kfree(info);
return -EIO;
}
@@ -2055,13 +2077,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else {
kfree(info);
- printk(KERN_WARNING
- "ipmi_si: Unknown ACPI I/O Address type\n");
+ printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
return -EIO;
}
info->io.addr_data = spmi->addr.address;
- try_smi_init(info);
+ add_smi(info);
return 0;
}
@@ -2093,6 +2114,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
{
struct acpi_device *acpi_dev;
struct smi_info *info;
+ struct resource *res;
acpi_handle handle;
acpi_status status;
unsigned long long tmp;
@@ -2105,7 +2127,8 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
if (!info)
return -ENOMEM;
- info->addr_source = "ACPI";
+ info->addr_source = SI_ACPI;
+ printk(KERN_INFO PFX "probing via ACPI\n");
handle = acpi_dev->handle;
@@ -2125,22 +2148,26 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
info->si_type = SI_BT;
break;
default:
- dev_info(&dev->dev, "unknown interface type %lld\n", tmp);
+ dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
goto err_free;
}
- if (pnp_port_valid(dev, 0)) {
+ res = pnp_get_resource(dev, IORESOURCE_IO, 0);
+ if (res) {
info->io_setup = port_setup;
info->io.addr_type = IPMI_IO_ADDR_SPACE;
- info->io.addr_data = pnp_port_start(dev, 0);
- } else if (pnp_mem_valid(dev, 0)) {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- info->io.addr_data = pnp_mem_start(dev, 0);
} else {
+ res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res) {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ }
+ }
+ if (!res) {
dev_err(&dev->dev, "no I/O or memory address\n");
goto err_free;
}
+ info->io.addr_data = res->start;
info->io.regspacing = DEFAULT_REGSPACING;
info->io.regsize = DEFAULT_REGSPACING;
@@ -2156,10 +2183,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
info->irq_setup = std_irq_setup;
}
- info->dev = &acpi_dev->dev;
+ info->dev = &dev->dev;
pnp_set_drvdata(dev, info);
- return try_smi_init(info);
+ dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
+ res, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ return add_smi(info);
err_free:
kfree(info);
@@ -2264,12 +2295,12 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
- printk(KERN_ERR
- "ipmi_si: Could not allocate SI data\n");
+ printk(KERN_ERR PFX "Could not allocate SI data\n");
return;
}
- info->addr_source = "SMBIOS";
+ info->addr_source = SI_SMBIOS;
+ printk(KERN_INFO PFX "probing via SMBIOS\n");
switch (ipmi_data->type) {
case 0x01: /* KCS */
@@ -2299,8 +2330,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
default:
kfree(info);
- printk(KERN_WARNING
- "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
+ printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
ipmi_data->addr_space);
return;
}
@@ -2318,7 +2348,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
if (info->irq)
info->irq_setup = std_irq_setup;
- try_smi_init(info);
+ add_smi(info);
}
static void __devinit dmi_find_bmc(void)
@@ -2368,7 +2398,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
if (!info)
return -ENOMEM;
- info->addr_source = "PCI";
+ info->addr_source = SI_PCI;
+ dev_info(&pdev->dev, "probing via PCI");
switch (class_type) {
case PCI_ERMC_CLASSCODE_TYPE_SMIC:
@@ -2385,15 +2416,13 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
default:
kfree(info);
- printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
- pci_name(pdev), class_type);
+ dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
return -ENOMEM;
}
rv = pci_enable_device(pdev);
if (rv) {
- printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "couldn't enable PCI device\n");
kfree(info);
return rv;
}
@@ -2421,7 +2450,11 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
info->dev = &pdev->dev;
pci_set_drvdata(pdev, info);
- return try_smi_init(info);
+ dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ &pdev->resource[0], info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ return add_smi(info);
}
static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
@@ -2469,11 +2502,11 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
struct smi_info *info;
struct resource resource;
const int *regsize, *regspacing, *regshift;
- struct device_node *np = dev->node;
+ struct device_node *np = dev->dev.of_node;
int ret;
int proplen;
- dev_info(&dev->dev, PFX "probing via device tree\n");
+ dev_info(&dev->dev, "probing via device tree\n");
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
@@ -2503,12 +2536,12 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
if (!info) {
dev_err(&dev->dev,
- PFX "could not allocate memory for OF probe\n");
+ "could not allocate memory for OF probe\n");
return -ENOMEM;
}
info->si_type = (enum si_type) match->data;
- info->addr_source = "device-tree";
+ info->addr_source = SI_DEVICETREE;
info->irq_setup = std_irq_setup;
if (resource.flags & IORESOURCE_IO) {
@@ -2525,16 +2558,16 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING;
info->io.regshift = regshift ? *regshift : 0;
- info->irq = irq_of_parse_and_map(dev->node, 0);
+ info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
info->dev = &dev->dev;
- dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
+ dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
info->io.addr_data, info->io.regsize, info->io.regspacing,
info->irq);
dev_set_drvdata(&dev->dev, info);
- return try_smi_init(info);
+ return add_smi(info);
}
static int __devexit ipmi_of_remove(struct of_device *dev)
@@ -2555,8 +2588,11 @@ static struct of_device_id ipmi_match[] =
};
static struct of_platform_driver ipmi_of_platform_driver = {
- .name = "ipmi",
- .match_table = ipmi_match,
+ .driver = {
+ .name = "ipmi",
+ .owner = THIS_MODULE,
+ .of_match_table = ipmi_match,
+ },
.probe = ipmi_of_probe,
.remove = __devexit_p(ipmi_of_remove),
};
@@ -2640,9 +2676,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: Error getting response from get global,"
- " enables command, the event buffer is not"
+ printk(KERN_WARNING PFX "Error getting response from get"
+ " global enables command, the event buffer is not"
" enabled.\n");
goto out;
}
@@ -2654,10 +2689,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Invalid return from get global"
- " enables command, cannot enable the event"
- " buffer.\n");
+ printk(KERN_WARNING PFX "Invalid return from get global"
+ " enables command, cannot enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -2673,9 +2706,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: Error getting response from set global,"
- " enables command, the event buffer is not"
+ printk(KERN_WARNING PFX "Error getting response from set"
+ " global, enables command, the event buffer is not"
" enabled.\n");
goto out;
}
@@ -2686,10 +2718,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
- printk(KERN_WARNING
- "ipmi_si: Invalid return from get global,"
- "enables command, not enable the event"
- " buffer.\n");
+ printk(KERN_WARNING PFX "Invalid return from get global,"
+ "enables command, not enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -2948,7 +2978,7 @@ static __devinit void default_find_bmc(void)
if (!info)
return;
- info->addr_source = NULL;
+ info->addr_source = SI_DEFAULT;
info->si_type = ipmi_defaults[i].type;
info->io_setup = port_setup;
@@ -2960,14 +2990,16 @@ static __devinit void default_find_bmc(void)
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = 0;
- if (try_smi_init(info) == 0) {
- /* Found one... */
- printk(KERN_INFO "ipmi_si: Found default %s state"
- " machine at %s address 0x%lx\n",
- si_to_str[info->si_type],
- addr_space_to_str[info->io.addr_type],
- info->io.addr_data);
- return;
+ if (add_smi(info) == 0) {
+ if ((try_smi_init(info)) == 0) {
+ /* Found one... */
+ printk(KERN_INFO PFX "Found default %s"
+ " state machine at %s address 0x%lx\n",
+ si_to_str[info->si_type],
+ addr_space_to_str[info->io.addr_type],
+ info->io.addr_data);
+ } else
+ cleanup_one_si(info);
}
}
}
@@ -2986,34 +3018,48 @@ static int is_new_interface(struct smi_info *info)
return 1;
}
-static int try_smi_init(struct smi_info *new_smi)
+static int add_smi(struct smi_info *new_smi)
{
- int rv;
- int i;
-
- if (new_smi->addr_source) {
- printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
- " machine at %s address 0x%lx, slave address 0x%x,"
- " irq %d\n",
- new_smi->addr_source,
- si_to_str[new_smi->si_type],
- addr_space_to_str[new_smi->io.addr_type],
- new_smi->io.addr_data,
- new_smi->slave_addr, new_smi->irq);
- }
+ int rv = 0;
+ printk(KERN_INFO PFX "Adding %s-specified %s state machine",
+ ipmi_addr_src_to_str[new_smi->addr_source],
+ si_to_str[new_smi->si_type]);
mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
- printk(KERN_WARNING "ipmi_si: duplicate interface\n");
+ printk(KERN_CONT PFX "duplicate interface\n");
rv = -EBUSY;
goto out_err;
}
+ printk(KERN_CONT "\n");
+
/* So we know not to free it unless we have allocated one. */
new_smi->intf = NULL;
new_smi->si_sm = NULL;
new_smi->handlers = NULL;
+ list_add_tail(&new_smi->link, &smi_infos);
+
+out_err:
+ mutex_unlock(&smi_infos_lock);
+ return rv;
+}
+
+static int try_smi_init(struct smi_info *new_smi)
+{
+ int rv = 0;
+ int i;
+
+ printk(KERN_INFO PFX "Trying %s-specified %s state"
+ " machine at %s address 0x%lx, slave address 0x%x,"
+ " irq %d\n",
+ ipmi_addr_src_to_str[new_smi->addr_source],
+ si_to_str[new_smi->si_type],
+ addr_space_to_str[new_smi->io.addr_type],
+ new_smi->io.addr_data,
+ new_smi->slave_addr, new_smi->irq);
+
switch (new_smi->si_type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
@@ -3036,7 +3082,8 @@ static int try_smi_init(struct smi_info *new_smi)
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
- printk(KERN_ERR "Could not allocate state machine memory\n");
+ printk(KERN_ERR PFX
+ "Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
}
@@ -3046,7 +3093,7 @@ static int try_smi_init(struct smi_info *new_smi)
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io_setup(new_smi);
if (rv) {
- printk(KERN_ERR "Could not set up I/O space\n");
+ printk(KERN_ERR PFX "Could not set up I/O space\n");
goto out_err;
}
@@ -3056,8 +3103,7 @@ static int try_smi_init(struct smi_info *new_smi)
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source)
- printk(KERN_INFO "ipmi_si: Interface detection"
- " failed\n");
+ printk(KERN_INFO PFX "Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
@@ -3069,7 +3115,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->addr_source)
- printk(KERN_INFO "ipmi_si: There appears to be no BMC"
+ printk(KERN_INFO PFX "There appears to be no BMC"
" at this location\n");
goto out_err;
}
@@ -3085,7 +3131,7 @@ static int try_smi_init(struct smi_info *new_smi)
for (i = 0; i < SI_NUM_STATS; i++)
atomic_set(&new_smi->stats[i], 0);
- new_smi->interrupt_disabled = 0;
+ new_smi->interrupt_disabled = 1;
atomic_set(&new_smi->stop_operation, 0);
new_smi->intf_num = smi_num;
smi_num++;
@@ -3111,9 +3157,8 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->intf_num);
if (!new_smi->pdev) {
- printk(KERN_ERR
- "ipmi_si_intf:"
- " Unable to allocate platform device\n");
+ printk(KERN_ERR PFX
+ "Unable to allocate platform device\n");
goto out_err;
}
new_smi->dev = &new_smi->pdev->dev;
@@ -3121,9 +3166,8 @@ static int try_smi_init(struct smi_info *new_smi)
rv = platform_device_add(new_smi->pdev);
if (rv) {
- printk(KERN_ERR
- "ipmi_si_intf:"
- " Unable to register system interface device:"
+ printk(KERN_ERR PFX
+ "Unable to register system interface device:"
" %d\n",
rv);
goto out_err;
@@ -3138,9 +3182,8 @@ static int try_smi_init(struct smi_info *new_smi)
"bmc",
new_smi->slave_addr);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to register device: error %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to register device: error %d\n",
+ rv);
goto out_err_stop_timer;
}
@@ -3148,9 +3191,7 @@ static int try_smi_init(struct smi_info *new_smi)
type_file_read_proc,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
@@ -3158,9 +3199,7 @@ static int try_smi_init(struct smi_info *new_smi)
stat_file_read_proc,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
@@ -3168,18 +3207,12 @@ static int try_smi_init(struct smi_info *new_smi)
param_read_proc,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
- list_add_tail(&new_smi->link, &smi_infos);
-
- mutex_unlock(&smi_infos_lock);
-
- printk(KERN_INFO "IPMI %s interface initialized\n",
- si_to_str[new_smi->si_type]);
+ dev_info(new_smi->dev, "IPMI %s interface initialized\n",
+ si_to_str[new_smi->si_type]);
return 0;
@@ -3188,11 +3221,17 @@ static int try_smi_init(struct smi_info *new_smi)
wait_for_timer_and_thread(new_smi);
out_err:
- if (new_smi->intf)
+ new_smi->interrupt_disabled = 1;
+
+ if (new_smi->intf) {
ipmi_unregister_smi(new_smi->intf);
+ new_smi->intf = NULL;
+ }
- if (new_smi->irq_cleanup)
+ if (new_smi->irq_cleanup) {
new_smi->irq_cleanup(new_smi);
+ new_smi->irq_cleanup = NULL;
+ }
/*
* Wait until we know that we are out of any interrupt
@@ -3205,18 +3244,21 @@ static int try_smi_init(struct smi_info *new_smi)
if (new_smi->handlers)
new_smi->handlers->cleanup(new_smi->si_sm);
kfree(new_smi->si_sm);
+ new_smi->si_sm = NULL;
}
- if (new_smi->addr_source_cleanup)
+ if (new_smi->addr_source_cleanup) {
new_smi->addr_source_cleanup(new_smi);
- if (new_smi->io_cleanup)
+ new_smi->addr_source_cleanup = NULL;
+ }
+ if (new_smi->io_cleanup) {
new_smi->io_cleanup(new_smi);
+ new_smi->io_cleanup = NULL;
+ }
- if (new_smi->dev_registered)
+ if (new_smi->dev_registered) {
platform_device_unregister(new_smi->pdev);
-
- kfree(new_smi);
-
- mutex_unlock(&smi_infos_lock);
+ new_smi->dev_registered = 0;
+ }
return rv;
}
@@ -3226,6 +3268,8 @@ static __devinit int init_ipmi_si(void)
int i;
char *str;
int rv;
+ struct smi_info *e;
+ enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
@@ -3234,9 +3278,7 @@ static __devinit int init_ipmi_si(void)
/* Register the device drivers. */
rv = driver_register(&ipmi_driver.driver);
if (rv) {
- printk(KERN_ERR
- "init_ipmi_si: Unable to register driver: %d\n",
- rv);
+ printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
return rv;
}
@@ -3260,38 +3302,81 @@ static __devinit int init_ipmi_si(void)
hardcode_find_bmc();
-#ifdef CONFIG_DMI
- dmi_find_bmc();
-#endif
+ /* If the user gave us a device, they presumably want us to use it */
+ mutex_lock(&smi_infos_lock);
+ if (!list_empty(&smi_infos)) {
+ mutex_unlock(&smi_infos_lock);
+ return 0;
+ }
+ mutex_unlock(&smi_infos_lock);
-#ifdef CONFIG_ACPI
- spmi_find_bmc();
+#ifdef CONFIG_PCI
+ rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv)
+ printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
#endif
+
#ifdef CONFIG_ACPI
pnp_register_driver(&ipmi_pnp_driver);
#endif
-#ifdef CONFIG_PCI
- rv = pci_register_driver(&ipmi_pci_driver);
- if (rv)
- printk(KERN_ERR
- "init_ipmi_si: Unable to register PCI driver: %d\n",
- rv);
+#ifdef CONFIG_DMI
+ dmi_find_bmc();
+#endif
+
+#ifdef CONFIG_ACPI
+ spmi_find_bmc();
#endif
#ifdef CONFIG_PPC_OF
of_register_platform_driver(&ipmi_of_platform_driver);
#endif
+ /* We prefer devices with interrupts, but in the case of a machine
+ with multiple BMCs we assume that there will be several instances
+ of a given type so if we succeed in registering a type then also
+ try to register everything else of the same type */
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry(e, &smi_infos, link) {
+ /* Try to register a device if it has an IRQ and we either
+ haven't successfully registered a device yet or this
+ device has the same type as one we successfully registered */
+ if (e->irq && (!type || e->addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->addr_source;
+ }
+ }
+ }
+
+ /* type will only have been set if we successfully registered an si */
+ if (type) {
+ mutex_unlock(&smi_infos_lock);
+ return 0;
+ }
+
+ /* Fall back to the preferred device */
+
+ list_for_each_entry(e, &smi_infos, link) {
+ if (!e->irq && (!type || e->addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->addr_source;
+ }
+ }
+ }
+ mutex_unlock(&smi_infos_lock);
+
+ if (type)
+ return 0;
+
if (si_trydefaults) {
mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
/* No BMC was found, try defaults. */
mutex_unlock(&smi_infos_lock);
default_find_bmc();
- } else {
+ } else
mutex_unlock(&smi_infos_lock);
- }
}
mutex_lock(&smi_infos_lock);
@@ -3305,8 +3390,8 @@ static __devinit int init_ipmi_si(void)
of_unregister_platform_driver(&ipmi_of_platform_driver);
#endif
driver_unregister(&ipmi_driver.driver);
- printk(KERN_WARNING
- "ipmi_si: Unable to find any System Interface(s)\n");
+ printk(KERN_WARNING PFX
+ "Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
@@ -3317,7 +3402,7 @@ module_init(init_ipmi_si);
static void cleanup_one_si(struct smi_info *to_clean)
{
- int rv;
+ int rv = 0;
unsigned long flags;
if (!to_clean)
@@ -3361,14 +3446,16 @@ static void cleanup_one_si(struct smi_info *to_clean)
schedule_timeout_uninterruptible(1);
}
- rv = ipmi_unregister_smi(to_clean->intf);
+ if (to_clean->intf)
+ rv = ipmi_unregister_smi(to_clean->intf);
+
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to unregister device: errno=%d\n",
+ printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
rv);
}
- to_clean->handlers->cleanup(to_clean->si_sm);
+ if (to_clean->handlers)
+ to_clean->handlers->cleanup(to_clean->si_sm);
kfree(to_clean->si_sm);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index a4d57e31f713..82bcdb262a3a 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -659,7 +659,7 @@ static struct watchdog_info ident = {
.identity = "IPMI"
};
-static int ipmi_ioctl(struct inode *inode, struct file *file,
+static int ipmi_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
@@ -730,6 +730,19 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
}
}
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ipmi_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static ssize_t ipmi_write(struct file *file,
const char __user *buf,
size_t len,
@@ -880,7 +893,7 @@ static const struct file_operations ipmi_wdog_fops = {
.read = ipmi_read,
.poll = ipmi_poll,
.write = ipmi_write,
- .ioctl = ipmi_ioctl,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
.open = ipmi_open,
.release = ipmi_close,
.fasync = ipmi_fasync,
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 92ab03d28294..cd650ca8c679 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -144,6 +144,7 @@ static int misc_open(struct inode * inode, struct file * file)
old_fops = file->f_op;
file->f_op = new_fops;
if (file->f_op->open) {
+ file->private_data = c;
err=file->f_op->open(inode,file);
if (err) {
fops_put(file->f_op);
diff --git a/drivers/char/n_gsm.c b/drivers/char/n_gsm.c
index c4161d5e053d..e4089c432f15 100644
--- a/drivers/char/n_gsm.c
+++ b/drivers/char/n_gsm.c
@@ -904,9 +904,7 @@ static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
int len;
/* Priority ordering: We should do priority with RR of the groups */
int i = 1;
- unsigned long flags;
- spin_lock_irqsave(&gsm->tx_lock, flags);
while (i < NUM_DLCI) {
struct gsm_dlci *dlci;
@@ -927,7 +925,6 @@ static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
if (len == 0)
i++;
}
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
}
/**
@@ -2230,12 +2227,16 @@ static int gsmld_open(struct tty_struct *tty)
static void gsmld_write_wakeup(struct tty_struct *tty)
{
struct gsm_mux *gsm = tty->disc_data;
+ unsigned long flags;
/* Queue poll */
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
gsm_data_kick(gsm);
- if (gsm->tx_bytes < TX_THRESH_LO)
+ if (gsm->tx_bytes < TX_THRESH_LO) {
+ spin_lock_irqsave(&gsm->tx_lock, flags);
gsm_dlci_data_sweep(gsm);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ }
}
/**
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 47e8f7b0e4c1..66d2917b003f 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -296,8 +296,8 @@ checksum_err:
return -EIO;
}
-static int nvram_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long nvram_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
int i;
@@ -308,6 +308,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ lock_kernel();
spin_lock_irq(&rtc_lock);
for (i = 0; i < NVRAM_BYTES; ++i)
@@ -315,6 +316,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
__nvram_set_checksum();
spin_unlock_irq(&rtc_lock);
+ unlock_kernel();
return 0;
case NVRAM_SETCKS:
@@ -323,9 +325,11 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ lock_kernel();
spin_lock_irq(&rtc_lock);
__nvram_set_checksum();
spin_unlock_irq(&rtc_lock);
+ unlock_kernel();
return 0;
default:
@@ -422,7 +426,7 @@ static const struct file_operations nvram_fops = {
.llseek = nvram_llseek,
.read = nvram_read,
.write = nvram_write,
- .ioctl = nvram_ioctl,
+ .unlocked_ioctl = nvram_ioctl,
.open = nvram_open,
.release = nvram_release,
};
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index f80810901db6..043a1c7b86be 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -94,8 +94,9 @@ static int get_flash_id(void)
return c2;
}
-static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cmd, unsigned long arg)
+static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
+ lock_kernel();
switch (cmd) {
case CMD_WRITE_DISABLE:
gbWriteBase64Enable = 0;
@@ -113,8 +114,10 @@ static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cm
default:
gbWriteBase64Enable = 0;
gbWriteEnable = 0;
+ unlock_kernel();
return -EINVAL;
}
+ unlock_kernel();
return 0;
}
@@ -631,7 +634,7 @@ static const struct file_operations flash_fops =
.llseek = flash_llseek,
.read = flash_read,
.write = flash_write,
- .ioctl = flash_ioctl,
+ .unlocked_ioctl = flash_ioctl,
};
static struct miscdevice flash_miscdev =
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index fdd37543aa79..02abfddce45a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -287,12 +287,10 @@ static int register_device (int minor, struct pp_struct *pp)
char *name;
int fl;
- name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
if (name == NULL)
return -ENOMEM;
- sprintf (name, CHRDEV "%x", minor);
-
port = parport_find_number (minor);
if (!port) {
printk (KERN_WARNING "%s: no associated port!\n", name);
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 606048b72bcf..85c004a518ee 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -305,8 +305,7 @@ static int ps3flash_flush(struct file *file, fl_owner_t id)
return ps3flash_writeback(ps3flash_dev);
}
-static int ps3flash_fsync(struct file *file, struct dentry *dentry,
- int datasync)
+static int ps3flash_fsync(struct file *file, int datasync)
{
return ps3flash_writeback(ps3flash_dev);
}
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
new file mode 100644
index 000000000000..74f00b5ffa36
--- /dev/null
+++ b/drivers/char/ramoops.c
@@ -0,0 +1,162 @@
+/*
+ * RAM Oops/Panic logger
+ *
+ * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmsg_dump.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+
+#define RAMOOPS_KERNMSG_HDR "===="
+#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
+
+#define RECORD_SIZE 4096
+
+static ulong mem_address;
+module_param(mem_address, ulong, 0400);
+MODULE_PARM_DESC(mem_address,
+ "start of reserved RAM used to store oops/panic logs");
+
+static ulong mem_size;
+module_param(mem_size, ulong, 0400);
+MODULE_PARM_DESC(mem_size,
+ "size of reserved RAM used to store oops/panic logs");
+
+static int dump_oops = 1;
+module_param(dump_oops, int, 0600);
+MODULE_PARM_DESC(dump_oops,
+ "set to 1 to dump oopses, 0 to only dump panics (default 1)");
+
+static struct ramoops_context {
+ struct kmsg_dumper dump;
+ void *virt_addr;
+ phys_addr_t phys_addr;
+ unsigned long size;
+ int count;
+ int max_count;
+} oops_cxt;
+
+static void ramoops_do_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
+ const char *s2, unsigned long l2)
+{
+ struct ramoops_context *cxt = container_of(dumper,
+ struct ramoops_context, dump);
+ unsigned long s1_start, s2_start;
+ unsigned long l1_cpy, l2_cpy;
+ int res;
+ char *buf;
+ struct timeval timestamp;
+
+ /* Only dump oopses if dump_oops is set */
+ if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ return;
+
+ buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
+ memset(buf, '\0', RECORD_SIZE);
+ res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
+ buf += res;
+ do_gettimeofday(&timestamp);
+ res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
+ buf += res;
+
+ l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
+ l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
+
+ s2_start = l2 - l2_cpy;
+ s1_start = l1 - l1_cpy;
+
+ memcpy(buf, s1 + s1_start, l1_cpy);
+ memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy);
+
+ cxt->count = (cxt->count + 1) % cxt->max_count;
+}
+
+static int __init ramoops_init(void)
+{
+ struct ramoops_context *cxt = &oops_cxt;
+ int err = -EINVAL;
+
+ if (!mem_size) {
+ printk(KERN_ERR "ramoops: invalid size specification");
+ goto fail3;
+ }
+
+ rounddown_pow_of_two(mem_size);
+
+ if (mem_size < RECORD_SIZE) {
+ printk(KERN_ERR "ramoops: size too small");
+ goto fail3;
+ }
+
+ cxt->max_count = mem_size / RECORD_SIZE;
+ cxt->count = 0;
+ cxt->size = mem_size;
+ cxt->phys_addr = mem_address;
+
+ if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
+ printk(KERN_ERR "ramoops: request mem region failed");
+ err = -EINVAL;
+ goto fail3;
+ }
+
+ cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
+ if (!cxt->virt_addr) {
+ printk(KERN_ERR "ramoops: ioremap failed");
+ goto fail2;
+ }
+
+ cxt->dump.dump = ramoops_do_dump;
+ err = kmsg_dump_register(&cxt->dump);
+ if (err) {
+ printk(KERN_ERR "ramoops: registering kmsg dumper failed");
+ goto fail1;
+ }
+
+ return 0;
+
+fail1:
+ iounmap(cxt->virt_addr);
+fail2:
+ release_mem_region(cxt->phys_addr, cxt->size);
+fail3:
+ return err;
+}
+
+static void __exit ramoops_exit(void)
+{
+ struct ramoops_context *cxt = &oops_cxt;
+
+ if (kmsg_dump_unregister(&cxt->dump) < 0)
+ printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper");
+
+ iounmap(cxt->virt_addr);
+ release_mem_region(cxt->phys_addr, cxt->size);
+}
+
+
+module_init(ramoops_init);
+module_exit(ramoops_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>");
+MODULE_DESCRIPTION("RAM Oops/Panic logger/driver");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2fd3d39995d5..8d85587b6d4f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -257,6 +257,7 @@
#define INPUT_POOL_WORDS 128
#define OUTPUT_POOL_WORDS 32
#define SEC_XFER_SIZE 512
+#define EXTRACT_SIZE 10
/*
* The minimum number of bits of entropy before we wake up a read on
@@ -414,7 +415,7 @@ struct entropy_store {
unsigned add_ptr;
int entropy_count;
int input_rotate;
- __u8 *last_data;
+ __u8 last_data[EXTRACT_SIZE];
};
static __u32 input_pool_data[INPUT_POOL_WORDS];
@@ -714,8 +715,6 @@ void add_disk_randomness(struct gendisk *disk)
}
#endif
-#define EXTRACT_SIZE 10
-
/*********************************************************************
*
* Entropy extraction routines
@@ -862,7 +861,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
while (nbytes) {
extract_buf(r, tmp);
- if (r->last_data) {
+ if (fips_enabled) {
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
@@ -951,9 +950,6 @@ static void init_std_data(struct entropy_store *r)
now = ktime_get_real();
mix_pool_bytes(r, &now, sizeof(now));
mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
- /* Enable continuous test in fips mode */
- if (fips_enabled)
- r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
}
static int rand_initialize(void)
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 8756ab0daa8b..b38942f6bf31 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -121,13 +121,17 @@ static int raw_release(struct inode *inode, struct file *filp)
/*
* Forward ioctls to the underlying block device.
*/
-static int
-raw_ioctl(struct inode *inode, struct file *filp,
- unsigned int command, unsigned long arg)
+static long
+raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
{
struct block_device *bdev = filp->private_data;
+ int ret;
+
+ lock_kernel();
+ ret = blkdev_ioctl(bdev, 0, command, arg);
+ unlock_kernel();
- return blkdev_ioctl(bdev, 0, command, arg);
+ return ret;
}
static void bind_device(struct raw_config_request *rq)
@@ -141,13 +145,14 @@ static void bind_device(struct raw_config_request *rq)
* Deal with ioctls against the raw-device control interface, to bind
* and unbind other raw devices.
*/
-static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
- unsigned int command, unsigned long arg)
+static long raw_ctl_ioctl(struct file *filp, unsigned int command,
+ unsigned long arg)
{
struct raw_config_request rq;
struct raw_device_data *rawdev;
int err = 0;
+ lock_kernel();
switch (command) {
case RAW_SETBIND:
case RAW_GETBIND:
@@ -240,25 +245,26 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
break;
}
out:
+ unlock_kernel();
return err;
}
static const struct file_operations raw_fops = {
- .read = do_sync_read,
- .aio_read = generic_file_aio_read,
- .write = do_sync_write,
- .aio_write = blkdev_aio_write,
- .fsync = blkdev_fsync,
- .open = raw_open,
- .release= raw_release,
- .ioctl = raw_ioctl,
- .owner = THIS_MODULE,
+ .read = do_sync_read,
+ .aio_read = generic_file_aio_read,
+ .write = do_sync_write,
+ .aio_write = blkdev_aio_write,
+ .fsync = blkdev_fsync,
+ .open = raw_open,
+ .release = raw_release,
+ .unlocked_ioctl = raw_ioctl,
+ .owner = THIS_MODULE,
};
static const struct file_operations raw_ctl_fops = {
- .ioctl = raw_ctl_ioctl,
- .open = raw_open,
- .owner = THIS_MODULE,
+ .unlocked_ioctl = raw_ctl_ioctl,
+ .open = raw_open,
+ .owner = THIS_MODULE,
};
static struct cdev raw_cdev;
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 1144a04cda6e..42f7fa442ff8 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -866,7 +866,7 @@ static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
int i = vdev->unit_address;
int j;
- struct device_node *node = vdev->dev.archdata.of_node;
+ struct device_node *node = vdev->dev.of_node;
if (i >= VIOTAPE_MAX_TAPE)
return -ENODEV;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 196428c2287a..942a9826bd23 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -33,35 +33,6 @@
#include <linux/workqueue.h>
#include "hvc_console.h"
-/* Moved here from .h file in order to disable MULTIPORT. */
-#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
-
-struct virtio_console_multiport_conf {
- struct virtio_console_config config;
- /* max. number of ports this device can hold */
- __u32 max_nr_ports;
- /* number of ports added so far */
- __u32 nr_ports;
-} __attribute__((packed));
-
-/*
- * A message that's passed between the Host and the Guest for a
- * particular port.
- */
-struct virtio_console_control {
- __u32 id; /* Port number */
- __u16 event; /* The kind of control event (see below) */
- __u16 value; /* Extra information for the key */
-};
-
-/* Some events for control messages */
-#define VIRTIO_CONSOLE_PORT_READY 0
-#define VIRTIO_CONSOLE_CONSOLE_PORT 1
-#define VIRTIO_CONSOLE_RESIZE 2
-#define VIRTIO_CONSOLE_PORT_OPEN 3
-#define VIRTIO_CONSOLE_PORT_NAME 4
-#define VIRTIO_CONSOLE_PORT_REMOVE 5
-
/*
* This is a global struct for storing common data for all the devices
* this driver handles.
@@ -107,6 +78,9 @@ struct console {
/* The hvc device associated with this console port */
struct hvc_struct *hvc;
+ /* The size of the console */
+ struct winsize ws;
+
/*
* This number identifies the number that we used to register
* with hvc in hvc_instantiate() and hvc_alloc(); this is the
@@ -139,7 +113,6 @@ struct ports_device {
* notification
*/
struct work_struct control_work;
- struct work_struct config_work;
struct list_head ports;
@@ -150,7 +123,7 @@ struct ports_device {
spinlock_t cvq_lock;
/* The current config space is stored here */
- struct virtio_console_multiport_conf config;
+ struct virtio_console_config config;
/* The virtio device we're associated with */
struct virtio_device *vdev;
@@ -189,6 +162,9 @@ struct port {
*/
spinlock_t inbuf_lock;
+ /* Protect the operations on the out_vq. */
+ spinlock_t outvq_lock;
+
/* The IO vqs for this port */
struct virtqueue *in_vq, *out_vq;
@@ -214,6 +190,8 @@ struct port {
/* The 'id' to identify the port with the Host */
u32 id;
+ bool outvq_full;
+
/* Is the host device open */
bool host_connected;
@@ -328,7 +306,7 @@ static void *get_inbuf(struct port *port)
unsigned int len;
vq = port->in_vq;
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
if (buf) {
buf->len = len;
buf->offset = 0;
@@ -349,8 +327,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
sg_init_one(sg, buf->buf, buf->size);
- ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf);
- vq->vq_ops->kick(vq);
+ ret = virtqueue_add_buf(vq, sg, 0, 1, buf);
+ virtqueue_kick(vq);
return ret;
}
@@ -366,7 +344,7 @@ static void discard_port_data(struct port *port)
if (port->inbuf)
buf = port->inbuf;
else
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
ret = 0;
while (buf) {
@@ -374,7 +352,7 @@ static void discard_port_data(struct port *port)
ret++;
free_buf(buf);
}
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
}
port->inbuf = NULL;
if (ret)
@@ -403,57 +381,96 @@ out:
return ret;
}
-static ssize_t send_control_msg(struct port *port, unsigned int event,
- unsigned int value)
+static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
+ unsigned int event, unsigned int value)
{
struct scatterlist sg[1];
struct virtio_console_control cpkt;
struct virtqueue *vq;
unsigned int len;
- if (!use_multiport(port->portdev))
+ if (!use_multiport(portdev))
return 0;
- cpkt.id = port->id;
+ cpkt.id = port_id;
cpkt.event = event;
cpkt.value = value;
- vq = port->portdev->c_ovq;
+ vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
- if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
- vq->vq_ops->kick(vq);
- while (!vq->vq_ops->get_buf(vq, &len))
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+ virtqueue_kick(vq);
+ while (!virtqueue_get_buf(vq, &len))
cpu_relax();
}
return 0;
}
-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count)
+static ssize_t send_control_msg(struct port *port, unsigned int event,
+ unsigned int value)
+{
+ return __send_control_msg(port->portdev, port->id, event, value);
+}
+
+/* Callers must take the port->outvq_lock */
+static void reclaim_consumed_buffers(struct port *port)
+{
+ void *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
+ kfree(buf);
+ port->outvq_full = false;
+ }
+}
+
+static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
+ bool nonblock)
{
struct scatterlist sg[1];
struct virtqueue *out_vq;
ssize_t ret;
+ unsigned long flags;
unsigned int len;
out_vq = port->out_vq;
+ spin_lock_irqsave(&port->outvq_lock, flags);
+
+ reclaim_consumed_buffers(port);
+
sg_init_one(sg, in_buf, in_count);
- ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf);
+ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
/* Tell Host to go! */
- out_vq->vq_ops->kick(out_vq);
+ virtqueue_kick(out_vq);
if (ret < 0) {
in_count = 0;
- goto fail;
+ goto done;
}
- /* Wait till the host acknowledges it pushed out the data we sent. */
- while (!out_vq->vq_ops->get_buf(out_vq, &len))
+ if (ret == 0)
+ port->outvq_full = true;
+
+ if (nonblock)
+ goto done;
+
+ /*
+ * Wait till the host acknowledges it pushed out the data we
+ * sent. This is done for ports in blocking mode or for data
+ * from the hvc_console; the tty operations are performed with
+ * spinlocks held so we can't sleep here.
+ */
+ while (!virtqueue_get_buf(out_vq, &len))
cpu_relax();
-fail:
- /* We're expected to return the amount of data we wrote */
+done:
+ spin_unlock_irqrestore(&port->outvq_lock, flags);
+ /*
+ * We're expected to return the amount of data we wrote -- all
+ * of it
+ */
return in_count;
}
@@ -503,9 +520,32 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
}
/* The condition that must be true for polling to end */
-static bool wait_is_over(struct port *port)
+static bool will_read_block(struct port *port)
{
- return port_has_data(port) || !port->host_connected;
+ return !port_has_data(port) && port->host_connected;
+}
+
+static bool will_write_block(struct port *port)
+{
+ bool ret;
+
+ if (!port->guest_connected) {
+ /* Port got hot-unplugged. Let's exit. */
+ return false;
+ }
+ if (!port->host_connected)
+ return true;
+
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * Check if the Host has consumed any buffers since we last
+ * sent data (this is only applicable for nonblocking ports).
+ */
+ reclaim_consumed_buffers(port);
+ ret = port->outvq_full;
+ spin_unlock_irq(&port->outvq_lock);
+
+ return ret;
}
static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
@@ -528,7 +568,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
return -EAGAIN;
ret = wait_event_interruptible(port->waitqueue,
- wait_is_over(port));
+ !will_read_block(port));
if (ret < 0)
return ret;
}
@@ -554,9 +594,22 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
struct port *port;
char *buf;
ssize_t ret;
+ bool nonblock;
port = filp->private_data;
+ nonblock = filp->f_flags & O_NONBLOCK;
+
+ if (will_write_block(port)) {
+ if (nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(port->waitqueue,
+ !will_write_block(port));
+ if (ret < 0)
+ return ret;
+ }
+
count = min((size_t)(32 * 1024), count);
buf = kmalloc(count, GFP_KERNEL);
@@ -569,9 +622,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
goto free_buf;
}
- ret = send_buf(port, buf, count);
+ ret = send_buf(port, buf, count, nonblock);
+
+ if (nonblock && ret > 0)
+ goto out;
+
free_buf:
kfree(buf);
+out:
return ret;
}
@@ -586,7 +644,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
ret = 0;
if (port->inbuf)
ret |= POLLIN | POLLRDNORM;
- if (port->host_connected)
+ if (!will_write_block(port))
ret |= POLLOUT;
if (!port->host_connected)
ret |= POLLHUP;
@@ -610,6 +668,10 @@ static int port_fops_release(struct inode *inode, struct file *filp)
spin_unlock_irq(&port->inbuf_lock);
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
return 0;
}
@@ -638,6 +700,15 @@ static int port_fops_open(struct inode *inode, struct file *filp)
port->guest_connected = true;
spin_unlock_irq(&port->inbuf_lock);
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * There might be a chance that we missed reclaiming a few
+ * buffers in the window of the port getting previously closed
+ * and opening now.
+ */
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
/* Notify host of port being opened */
send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
@@ -676,9 +747,9 @@ static int put_chars(u32 vtermno, const char *buf, int count)
port = find_port_by_vtermno(vtermno);
if (!port)
- return 0;
+ return -EPIPE;
- return send_buf(port, (void *)buf, count);
+ return send_buf(port, (void *)buf, count, false);
}
/*
@@ -692,9 +763,13 @@ static int get_chars(u32 vtermno, char *buf, int count)
{
struct port *port;
+ /* If we've not set up the port yet, we have no input to give. */
+ if (unlikely(early_put_chars))
+ return 0;
+
port = find_port_by_vtermno(vtermno);
if (!port)
- return 0;
+ return -EPIPE;
/* If we don't have an input queue yet, we can't get input. */
BUG_ON(!port->in_vq);
@@ -705,22 +780,14 @@ static int get_chars(u32 vtermno, char *buf, int count)
static void resize_console(struct port *port)
{
struct virtio_device *vdev;
- struct winsize ws;
/* The port could have been hot-unplugged */
- if (!port)
+ if (!port || !is_console_port(port))
return;
vdev = port->portdev->vdev;
- if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) {
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, cols),
- &ws.ws_col, sizeof(u16));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, rows),
- &ws.ws_row, sizeof(u16));
- hvc_resize(port->cons.hvc, ws);
- }
+ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
+ hvc_resize(port->cons.hvc, port->cons.ws);
}
/* We set the configuration at this point, since we now have a tty */
@@ -804,6 +871,13 @@ int init_port_console(struct port *port)
spin_unlock_irq(&pdrvdata_lock);
port->guest_connected = true;
+ /*
+ * Start using the new console output if this is the first
+ * console to come up.
+ */
+ if (early_put_chars)
+ early_put_chars = NULL;
+
/* Notify host of port being opened */
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
@@ -859,6 +933,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"host_connected: %d\n", port->host_connected);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "outvq_full: %d\n", port->outvq_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
"is_console: %s\n",
is_console_port(port) ? "yes" : "no");
out_offset += snprintf(buf + out_offset, out_count - out_offset,
@@ -875,11 +951,165 @@ static const struct file_operations port_debugfs_ops = {
.read = debugfs_read,
};
+static void set_console_size(struct port *port, u16 rows, u16 cols)
+{
+ if (!port || !is_console_port(port))
+ return;
+
+ port->cons.ws.ws_row = rows;
+ port->cons.ws.ws_col = cols;
+}
+
+static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+{
+ struct port_buffer *buf;
+ unsigned int nr_added_bufs;
+ int ret;
+
+ nr_added_bufs = 0;
+ do {
+ buf = alloc_buf(PAGE_SIZE);
+ if (!buf)
+ break;
+
+ spin_lock_irq(lock);
+ ret = add_inbuf(vq, buf);
+ if (ret < 0) {
+ spin_unlock_irq(lock);
+ free_buf(buf);
+ break;
+ }
+ nr_added_bufs++;
+ spin_unlock_irq(lock);
+ } while (ret > 0);
+
+ return nr_added_bufs;
+}
+
+static int add_port(struct ports_device *portdev, u32 id)
+{
+ char debugfs_name[16];
+ struct port *port;
+ struct port_buffer *buf;
+ dev_t devt;
+ unsigned int nr_added_bufs;
+ int err;
+
+ port = kmalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ port->portdev = portdev;
+ port->id = id;
+
+ port->name = NULL;
+ port->inbuf = NULL;
+ port->cons.hvc = NULL;
+
+ port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+
+ port->host_connected = port->guest_connected = false;
+
+ port->outvq_full = false;
+
+ port->in_vq = portdev->in_vqs[port->id];
+ port->out_vq = portdev->out_vqs[port->id];
+
+ cdev_init(&port->cdev, &port_fops);
+
+ devt = MKDEV(portdev->chr_major, id);
+ err = cdev_add(&port->cdev, devt, 1);
+ if (err < 0) {
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d adding cdev for port %u\n", err, id);
+ goto free_port;
+ }
+ port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
+ devt, port, "vport%up%u",
+ port->portdev->drv_index, id);
+ if (IS_ERR(port->dev)) {
+ err = PTR_ERR(port->dev);
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d creating device for port %u\n",
+ err, id);
+ goto free_cdev;
+ }
+
+ spin_lock_init(&port->inbuf_lock);
+ spin_lock_init(&port->outvq_lock);
+ init_waitqueue_head(&port->waitqueue);
+
+ /* Fill the in_vq with buffers so the host can send us data. */
+ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (!nr_added_bufs) {
+ dev_err(port->dev, "Error allocating inbufs\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+
+ /*
+ * If we're not using multiport support, this has to be a console port
+ */
+ if (!use_multiport(port->portdev)) {
+ err = init_port_console(port);
+ if (err)
+ goto free_inbufs;
+ }
+
+ spin_lock_irq(&portdev->ports_lock);
+ list_add_tail(&port->list, &port->portdev->ports);
+ spin_unlock_irq(&portdev->ports_lock);
+
+ /*
+ * Tell the Host we're set so that it can send us various
+ * configuration parameters for this port (eg, port name,
+ * caching, whether this is a console port, etc.)
+ */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+ if (pdrvdata.debugfs_dir) {
+ /*
+ * Finally, create the debugfs file that we can use to
+ * inspect a port's state at any time
+ */
+ sprintf(debugfs_name, "vport%up%u",
+ port->portdev->drv_index, id);
+ port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ pdrvdata.debugfs_dir,
+ port,
+ &port_debugfs_ops);
+ }
+ return 0;
+
+free_inbufs:
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+ free_buf(buf);
+free_device:
+ device_destroy(pdrvdata.class, port->dev->devt);
+free_cdev:
+ cdev_del(&port->cdev);
+free_port:
+ kfree(port);
+fail:
+ /* The host might want to notify management sw about port add failure */
+ __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
+ return err;
+}
+
/* Remove all port-specific data. */
static int remove_port(struct port *port)
{
struct port_buffer *buf;
+ if (port->guest_connected) {
+ port->guest_connected = false;
+ port->host_connected = false;
+ wake_up_interruptible(&port->waitqueue);
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
+ }
+
spin_lock_irq(&port->portdev->ports_lock);
list_del(&port->list);
spin_unlock_irq(&port->portdev->ports_lock);
@@ -888,11 +1118,19 @@ static int remove_port(struct port *port)
spin_lock_irq(&pdrvdata_lock);
list_del(&port->cons.list);
spin_unlock_irq(&pdrvdata_lock);
+#if 0
+ /*
+ * hvc_remove() not called as removing one hvc port
+ * results in other hvc ports getting frozen.
+ *
+ * Once this is resolved in hvc, this functionality
+ * will be enabled. Till that is done, the -EPIPE
+ * return from get_chars() above will help
+ * hvc_console.c to clean up on ports we remove here.
+ */
hvc_remove(port->cons.hvc);
+#endif
}
- if (port->guest_connected)
- send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
-
sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
device_destroy(pdrvdata.class, port->dev->devt);
cdev_del(&port->cdev);
@@ -900,8 +1138,10 @@ static int remove_port(struct port *port)
/* Remove unused data this port might have received. */
discard_port_data(port);
+ reclaim_consumed_buffers(port);
+
/* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
free_buf(buf);
kfree(port->name);
@@ -924,7 +1164,7 @@ static void handle_control_message(struct ports_device *portdev,
cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
port = find_port_by_id(portdev, cpkt->id);
- if (!port) {
+ if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
/* No valid header at start of buffer. Drop it. */
dev_dbg(&portdev->vdev->dev,
"Invalid index %u in control packet\n", cpkt->id);
@@ -932,6 +1172,24 @@ static void handle_control_message(struct ports_device *portdev,
}
switch (cpkt->event) {
+ case VIRTIO_CONSOLE_PORT_ADD:
+ if (port) {
+ dev_dbg(&portdev->vdev->dev,
+ "Port %u already added\n", port->id);
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+ break;
+ }
+ if (cpkt->id >= portdev->config.max_nr_ports) {
+ dev_warn(&portdev->vdev->dev,
+ "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
+ cpkt->id, portdev->config.max_nr_ports - 1);
+ break;
+ }
+ add_port(portdev, cpkt->id);
+ break;
+ case VIRTIO_CONSOLE_PORT_REMOVE:
+ remove_port(port);
+ break;
case VIRTIO_CONSOLE_CONSOLE_PORT:
if (!cpkt->value)
break;
@@ -944,15 +1202,34 @@ static void handle_control_message(struct ports_device *portdev,
* have to notify the host first.
*/
break;
- case VIRTIO_CONSOLE_RESIZE:
+ case VIRTIO_CONSOLE_RESIZE: {
+ struct {
+ __u16 rows;
+ __u16 cols;
+ } size;
+
if (!is_console_port(port))
break;
+
+ memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
+ sizeof(size));
+ set_console_size(port, size.rows, size.cols);
+
port->cons.hvc->irq_requested = 1;
resize_console(port);
break;
+ }
case VIRTIO_CONSOLE_PORT_OPEN:
port->host_connected = cpkt->value;
wake_up_interruptible(&port->waitqueue);
+ /*
+ * If the host port got closed and the host had any
+ * unconsumed buffers, we'll be able to reclaim them
+ * now.
+ */
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
break;
case VIRTIO_CONSOLE_PORT_NAME:
/*
@@ -990,32 +1267,6 @@ static void handle_control_message(struct ports_device *portdev,
kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
}
break;
- case VIRTIO_CONSOLE_PORT_REMOVE:
- /*
- * Hot unplug the port. We don't decrement nr_ports
- * since we don't want to deal with extra complexities
- * of using the lowest-available port id: We can just
- * pick up the nr_ports number as the id and not have
- * userspace send it to us. This helps us in two
- * ways:
- *
- * - We don't need to have a 'port_id' field in the
- * config space when a port is hot-added. This is a
- * good thing as we might queue up multiple hotplug
- * requests issued in our workqueue.
- *
- * - Another way to deal with this would have been to
- * use a bitmap of the active ports and select the
- * lowest non-active port from that map. That
- * bloats the already tight config space and we
- * would end up artificially limiting the
- * max. number of ports to sizeof(bitmap). Right
- * now we can support 2^32 ports (as the port id is
- * stored in a u32 type).
- *
- */
- remove_port(port);
- break;
}
}
@@ -1030,7 +1281,7 @@ static void control_work_handler(struct work_struct *work)
vq = portdev->c_ivq;
spin_lock(&portdev->cvq_lock);
- while ((buf = vq->vq_ops->get_buf(vq, &len))) {
+ while ((buf = virtqueue_get_buf(vq, &len))) {
spin_unlock(&portdev->cvq_lock);
buf->len = len;
@@ -1092,204 +1343,29 @@ static void config_intr(struct virtio_device *vdev)
struct ports_device *portdev;
portdev = vdev->priv;
- if (use_multiport(portdev)) {
- /* Handle port hot-add */
- schedule_work(&portdev->config_work);
- }
- /*
- * We'll use this way of resizing only for legacy support.
- * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use
- * control messages to indicate console size changes so that
- * it can be done per-port
- */
- resize_console(find_port_by_id(portdev, 0));
-}
-
-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
-{
- struct port_buffer *buf;
- unsigned int nr_added_bufs;
- int ret;
-
- nr_added_bufs = 0;
- do {
- buf = alloc_buf(PAGE_SIZE);
- if (!buf)
- break;
-
- spin_lock_irq(lock);
- ret = add_inbuf(vq, buf);
- if (ret < 0) {
- spin_unlock_irq(lock);
- free_buf(buf);
- break;
- }
- nr_added_bufs++;
- spin_unlock_irq(lock);
- } while (ret > 0);
-
- return nr_added_bufs;
-}
-
-static int add_port(struct ports_device *portdev, u32 id)
-{
- char debugfs_name[16];
- struct port *port;
- struct port_buffer *buf;
- dev_t devt;
- unsigned int nr_added_bufs;
- int err;
-
- port = kmalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- err = -ENOMEM;
- goto fail;
- }
-
- port->portdev = portdev;
- port->id = id;
-
- port->name = NULL;
- port->inbuf = NULL;
- port->cons.hvc = NULL;
-
- port->host_connected = port->guest_connected = false;
-
- port->in_vq = portdev->in_vqs[port->id];
- port->out_vq = portdev->out_vqs[port->id];
-
- cdev_init(&port->cdev, &port_fops);
-
- devt = MKDEV(portdev->chr_major, id);
- err = cdev_add(&port->cdev, devt, 1);
- if (err < 0) {
- dev_err(&port->portdev->vdev->dev,
- "Error %d adding cdev for port %u\n", err, id);
- goto free_port;
- }
- port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
- devt, port, "vport%up%u",
- port->portdev->drv_index, id);
- if (IS_ERR(port->dev)) {
- err = PTR_ERR(port->dev);
- dev_err(&port->portdev->vdev->dev,
- "Error %d creating device for port %u\n",
- err, id);
- goto free_cdev;
- }
-
- spin_lock_init(&port->inbuf_lock);
- init_waitqueue_head(&port->waitqueue);
-
- /* Fill the in_vq with buffers so the host can send us data. */
- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
- if (!nr_added_bufs) {
- dev_err(port->dev, "Error allocating inbufs\n");
- err = -ENOMEM;
- goto free_device;
- }
-
- /*
- * If we're not using multiport support, this has to be a console port
- */
- if (!use_multiport(port->portdev)) {
- err = init_port_console(port);
- if (err)
- goto free_inbufs;
- }
-
- spin_lock_irq(&portdev->ports_lock);
- list_add_tail(&port->list, &port->portdev->ports);
- spin_unlock_irq(&portdev->ports_lock);
-
- /*
- * Tell the Host we're set so that it can send us various
- * configuration parameters for this port (eg, port name,
- * caching, whether this is a console port, etc.)
- */
- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
-
- if (pdrvdata.debugfs_dir) {
- /*
- * Finally, create the debugfs file that we can use to
- * inspect a port's state at any time
- */
- sprintf(debugfs_name, "vport%up%u",
- port->portdev->drv_index, id);
- port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
- pdrvdata.debugfs_dir,
- port,
- &port_debugfs_ops);
- }
- return 0;
-free_inbufs:
- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
- free_buf(buf);
-free_device:
- device_destroy(pdrvdata.class, port->dev->devt);
-free_cdev:
- cdev_del(&port->cdev);
-free_port:
- kfree(port);
-fail:
- return err;
-}
-
-/*
- * The workhandler for config-space updates.
- *
- * This is called when ports are hot-added.
- */
-static void config_work_handler(struct work_struct *work)
-{
- struct virtio_console_multiport_conf virtconconf;
- struct ports_device *portdev;
- struct virtio_device *vdev;
- int err;
+ if (!use_multiport(portdev)) {
+ struct port *port;
+ u16 rows, cols;
- portdev = container_of(work, struct ports_device, config_work);
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, cols),
+ &cols, sizeof(u16));
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, rows),
+ &rows, sizeof(u16));
- vdev = portdev->vdev;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- nr_ports),
- &virtconconf.nr_ports,
- sizeof(virtconconf.nr_ports));
+ port = find_port_by_id(portdev, 0);
+ set_console_size(port, rows, cols);
- if (portdev->config.nr_ports == virtconconf.nr_ports) {
/*
- * Port 0 got hot-added. Since we already did all the
- * other initialisation for it, just tell the Host
- * that the port is ready if we find the port. In
- * case the port was hot-removed earlier, we call
- * add_port to add the port.
+ * We'll use this way of resizing only for legacy
+ * support. For newer userspace
+ * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
+ * to indicate console size changes so that it can be
+ * done per-port.
*/
- struct port *port;
-
- port = find_port_by_id(portdev, 0);
- if (!port)
- add_port(portdev, 0);
- else
- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
- return;
- }
- if (virtconconf.nr_ports > portdev->config.max_nr_ports) {
- dev_warn(&vdev->dev,
- "More ports specified (%u) than allowed (%u)",
- portdev->config.nr_ports + 1,
- portdev->config.max_nr_ports);
- return;
- }
- if (virtconconf.nr_ports < portdev->config.nr_ports)
- return;
-
- /* Hot-add ports */
- while (virtconconf.nr_ports - portdev->config.nr_ports) {
- err = add_port(portdev, portdev->config.nr_ports);
- if (err)
- break;
- portdev->config.nr_ports++;
+ resize_console(port);
}
}
@@ -1414,7 +1490,6 @@ static const struct file_operations portdev_fops = {
static int __devinit virtcons_probe(struct virtio_device *vdev)
{
struct ports_device *portdev;
- u32 i;
int err;
bool multiport;
@@ -1443,37 +1518,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
}
multiport = false;
- portdev->config.nr_ports = 1;
portdev->config.max_nr_ports = 1;
-#if 0 /* Multiport is not quite ready yet --RR */
if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
multiport = true;
vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- nr_ports),
- &portdev->config.nr_ports,
- sizeof(portdev->config.nr_ports));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- max_nr_ports),
+ vdev->config->get(vdev, offsetof(struct virtio_console_config,
+ max_nr_ports),
&portdev->config.max_nr_ports,
sizeof(portdev->config.max_nr_ports));
- if (portdev->config.nr_ports > portdev->config.max_nr_ports) {
- dev_warn(&vdev->dev,
- "More ports (%u) specified than allowed (%u). Will init %u ports.",
- portdev->config.nr_ports,
- portdev->config.max_nr_ports,
- portdev->config.max_nr_ports);
-
- portdev->config.nr_ports = portdev->config.max_nr_ports;
- }
}
/* Let the Host know we support multiple ports.*/
vdev->config->finalize_features(vdev);
-#endif
err = init_vqs(portdev);
if (err < 0) {
@@ -1489,7 +1546,6 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
spin_lock_init(&portdev->cvq_lock);
INIT_WORK(&portdev->control_work, &control_work_handler);
- INIT_WORK(&portdev->config_work, &config_work_handler);
nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
if (!nr_added_bufs) {
@@ -1498,16 +1554,22 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
err = -ENOMEM;
goto free_vqs;
}
+ } else {
+ /*
+ * For backward compatibility: Create a console port
+ * if we're running on older host.
+ */
+ add_port(portdev, 0);
}
- for (i = 0; i < portdev->config.nr_ports; i++)
- add_port(portdev, i);
-
- /* Start using the new console output. */
- early_put_chars = NULL;
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 1);
return 0;
free_vqs:
+ /* The host might want to notify mgmt sw about device add failure */
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 0);
vdev->config->del_vqs(vdev);
kfree(portdev->in_vqs);
kfree(portdev->out_vqs);
@@ -1529,17 +1591,16 @@ static void virtcons_remove(struct virtio_device *vdev)
portdev = vdev->priv;
cancel_work_sync(&portdev->control_work);
- cancel_work_sync(&portdev->config_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
remove_port(port);
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
- while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len)))
+ while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
free_buf(buf);
- while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq)))
+ while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
free_buf(buf);
vdev->config->del_vqs(vdev);
@@ -1556,6 +1617,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_CONSOLE_F_SIZE,
+ VIRTIO_CONSOLE_F_MULTIPORT,
};
static struct virtio_driver virtio_console = {
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index bd1d1164fec5..7cdb6ee569cd 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -3967,13 +3967,9 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
font.charcount = op->charcount;
font.height = op->height;
font.width = op->width;
- font.data = kmalloc(size, GFP_KERNEL);
- if (!font.data)
- return -ENOMEM;
- if (copy_from_user(font.data, op->data, size)) {
- kfree(font.data);
- return -EFAULT;
- }
+ font.data = memdup_user(op->data, size);
+ if (IS_ERR(font.data))
+ return PTR_ERR(font.data);
acquire_console_sem();
if (vc->vc_sw->con_font_set)
rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 6aa10284104a..cb19dbc52136 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -1303,7 +1303,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
if (!perm)
goto eperm;
ret = copy_from_user(&ui, up, sizeof(struct unimapinit));
- if (!ret)
+ if (ret)
+ ret = -EFAULT;
+ else
con_clear_unimap(vc, &ui);
break;
}
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 7261b8d9087c..ed8a9cec2a05 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -772,18 +772,18 @@ hwicap_of_probe(struct of_device *op, const struct of_device_id *match)
dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match);
- rc = of_address_to_resource(op->node, 0, &res);
+ rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
return rc;
}
- id = of_get_property(op->node, "port-number", NULL);
+ id = of_get_property(op->dev.of_node, "port-number", NULL);
/* It's most likely that we're using V4, if the family is not
specified */
regs = &v4_config_registers;
- family = of_get_property(op->node, "xlnx,family", NULL);
+ family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
if (family) {
if (!strcmp(family, "virtex2p")) {
@@ -812,13 +812,12 @@ static const struct of_device_id __devinitconst hwicap_of_match[] = {
MODULE_DEVICE_TABLE(of, hwicap_of_match);
static struct of_platform_driver hwicap_of_driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- .match_table = hwicap_of_match,
.probe = hwicap_of_probe,
.remove = __devexit_p(hwicap_of_remove),
.driver = {
.name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = hwicap_of_match,
},
};
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index f6677cb19789..f3d3898898ed 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -412,18 +412,10 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
static int sh_cmt_clocksource_enable(struct clocksource *cs)
{
struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
- int ret;
p->total_cycles = 0;
- ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
- if (ret)
- return ret;
-
- /* TODO: calculate good shift from rate and counter bit width */
- cs->shift = 0;
- cs->mult = clocksource_hz2mult(p->rate, cs->shift);
- return 0;
+ return sh_cmt_start(p, FLAG_CLOCKSOURCE);
}
static void sh_cmt_clocksource_disable(struct clocksource *cs)
@@ -450,8 +442,20 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
cs->resume = sh_cmt_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /* clk_get_rate() needs an enabled clock */
+ clk_enable(p->clk);
+ p->rate = clk_get_rate(p->clk) / (p->width == 16) ? 512 : 8;
+ clk_disable(p->clk);
+
+ /* TODO: calculate good shift from rate and counter bit width */
+ cs->shift = 10;
+ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+
dev_info(&p->pdev->dev, "used as clock source\n");
+
clocksource_register(cs);
+
return 0;
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 8e44e14ec4c2..de715901b82a 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -199,16 +199,8 @@ static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
static int sh_tmu_clocksource_enable(struct clocksource *cs)
{
struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
- int ret;
-
- ret = sh_tmu_enable(p);
- if (ret)
- return ret;
- /* TODO: calculate good shift from rate and counter bit width */
- cs->shift = 10;
- cs->mult = clocksource_hz2mult(p->rate, cs->shift);
- return 0;
+ return sh_tmu_enable(p);
}
static void sh_tmu_clocksource_disable(struct clocksource *cs)
@@ -228,6 +220,16 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
cs->disable = sh_tmu_clocksource_disable;
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /* clk_get_rate() needs an enabled clock */
+ clk_enable(p->clk);
+ /* channel will be configured at parent clock / 4 */
+ p->rate = clk_get_rate(p->clk) / 4;
+ clk_disable(p->clk);
+ /* TODO: calculate good shift from rate and counter bit width */
+ cs->shift = 10;
+ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+
dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register(cs);
return 0;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 12fdd3987a36..199488576a05 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -156,7 +156,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (dev->enabled)
return 0;
- if (!cpuidle_curr_driver || !cpuidle_curr_governor)
+ if (!cpuidle_get_driver() || !cpuidle_curr_governor)
return -EIO;
if (!dev->state_count)
return -EINVAL;
@@ -207,7 +207,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
{
if (!dev->enabled)
return;
- if (!cpuidle_curr_driver || !cpuidle_curr_governor)
+ if (!cpuidle_get_driver() || !cpuidle_curr_governor)
return;
dev->enabled = 0;
@@ -271,10 +271,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
if (!sys_dev)
return -EINVAL;
- if (!try_module_get(cpuidle_curr_driver->owner))
+ if (!try_module_get(cpuidle_driver->owner))
return -EINVAL;
init_completion(&dev->kobj_unregister);
@@ -284,7 +285,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
if ((ret = cpuidle_add_sysfs(sys_dev))) {
- module_put(cpuidle_curr_driver->owner);
+ module_put(cpuidle_driver->owner);
return ret;
}
@@ -325,6 +326,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
if (dev->registered == 0)
return;
@@ -340,7 +342,7 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
cpuidle_resume_and_unlock();
- module_put(cpuidle_curr_driver->owner);
+ module_put(cpuidle_driver->owner);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 9476ba33ee2c..33e50d556f17 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -9,7 +9,6 @@
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
-extern struct cpuidle_driver *cpuidle_curr_driver;
extern struct list_head cpuidle_governors;
extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 2257004fe33d..fd1601e3d125 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -14,7 +14,7 @@
#include "cpuidle.h"
-struct cpuidle_driver *cpuidle_curr_driver;
+static struct cpuidle_driver *cpuidle_curr_driver;
DEFINE_SPINLOCK(cpuidle_driver_lock);
/**
@@ -40,13 +40,25 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
+ * cpuidle_get_driver - return the current driver
+ */
+struct cpuidle_driver *cpuidle_get_driver(void)
+{
+ return cpuidle_curr_driver;
+}
+EXPORT_SYMBOL_GPL(cpuidle_get_driver);
+
+/**
* cpuidle_unregister_driver - unregisters a driver
* @drv: the driver
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
- if (!drv)
+ if (drv != cpuidle_curr_driver) {
+ WARN(1, "invalid cpuidle_unregister_driver(%s)\n",
+ drv->name);
return;
+ }
spin_lock(&cpuidle_driver_lock);
cpuidle_curr_driver = NULL;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b81ad9c731ae..52ff8aa63f84 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -21,9 +21,12 @@
#include <linux/math64.h>
#define BUCKETS 12
+#define INTERVALS 8
#define RESOLUTION 1024
-#define DECAY 4
+#define DECAY 8
#define MAX_INTERESTING 50000
+#define STDDEV_THRESH 400
+
/*
* Concepts and ideas behind the menu governor
@@ -64,6 +67,16 @@
* indexed based on the magnitude of the expected duration as well as the
* "is IO outstanding" property.
*
+ * Repeatable-interval-detector
+ * ----------------------------
+ * There are some cases where "next timer" is a completely unusable predictor:
+ * Those cases where the interval is fixed, for example due to hardware
+ * interrupt mitigation, but also due to fixed transfer rate devices such as
+ * mice.
+ * For this, we use a different predictor: We track the duration of the last 8
+ * intervals and if the stand deviation of these 8 intervals is below a
+ * threshold value, we use the average of these intervals as prediction.
+ *
* Limiting Performance Impact
* ---------------------------
* C states, especially those with large exit latencies, can have a real
@@ -104,6 +117,8 @@ struct menu_device {
unsigned int exit_us;
unsigned int bucket;
u64 correction_factor[BUCKETS];
+ u32 intervals[INTERVALS];
+ int interval_ptr;
};
@@ -175,6 +190,42 @@ static u64 div_round64(u64 dividend, u32 divisor)
return div_u64(dividend + (divisor / 2), divisor);
}
+/*
+ * Try detecting repeating patterns by keeping track of the last 8
+ * intervals, and checking if the standard deviation of that set
+ * of points is below a threshold. If it is... then use the
+ * average of these 8 points as the estimated value.
+ */
+static void detect_repeating_patterns(struct menu_device *data)
+{
+ int i;
+ uint64_t avg = 0;
+ uint64_t stddev = 0; /* contains the square of the std deviation */
+
+ /* first calculate average and standard deviation of the past */
+ for (i = 0; i < INTERVALS; i++)
+ avg += data->intervals[i];
+ avg = avg / INTERVALS;
+
+ /* if the avg is beyond the known next tick, it's worthless */
+ if (avg > data->expected_us)
+ return;
+
+ for (i = 0; i < INTERVALS; i++)
+ stddev += (data->intervals[i] - avg) *
+ (data->intervals[i] - avg);
+
+ stddev = stddev / INTERVALS;
+
+ /*
+ * now.. if stddev is small.. then assume we have a
+ * repeating pattern and predict we keep doing this.
+ */
+
+ if (avg && stddev < STDDEV_THRESH)
+ data->predicted_us = avg;
+}
+
/**
* menu_select - selects the next idle state to enter
* @dev: the CPU
@@ -218,6 +269,8 @@ static int menu_select(struct cpuidle_device *dev)
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
RESOLUTION * DECAY);
+ detect_repeating_patterns(data);
+
/*
* We want to default to C1 (hlt), not to busy polling
* unless the timer is happening really really soon.
@@ -310,6 +363,11 @@ static void menu_update(struct cpuidle_device *dev)
new_factor = 1;
data->correction_factor[data->bucket] = new_factor;
+
+ /* update the repeating-pattern data */
+ data->intervals[data->interval_ptr++] = last_idle_us;
+ if (data->interval_ptr >= INTERVALS)
+ data->interval_ptr = 0;
}
/**
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 0ba9c8b8ee74..0310ffaec9df 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -47,10 +47,11 @@ static ssize_t show_current_driver(struct sysdev_class *class,
char *buf)
{
ssize_t ret;
+ struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
spin_lock(&cpuidle_driver_lock);
- if (cpuidle_curr_driver)
- ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name);
+ if (cpuidle_driver)
+ ret = sprintf(buf, "%s\n", cpuidle_driver->name);
else
ret = sprintf(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index b08403d7d1ca..fbf94cf496f0 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -170,6 +170,18 @@ config CRYPTO_DEV_MV_CESA
Currently the driver supports AES in ECB and CBC mode without DMA.
+config CRYPTO_DEV_NIAGARA2
+ tristate "Niagara2 Stream Processing Unit driver"
+ select CRYPTO_ALGAPI
+ depends on SPARC64
+ help
+ Each core of a Niagara2 processor contains a Stream
+ Processing Unit, which itself contains several cryptographic
+ sub-units. One set provides the Modular Arithmetic Unit,
+ used for SSL offload. The other set provides the Cipher
+ Group, which can perform encryption, decryption, hashing,
+ checksumming, and raw copies.
+
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_DES
@@ -222,4 +234,13 @@ config CRYPTO_DEV_PPC4XX
help
This option allows you to have support for AMCC crypto acceleration.
+config CRYPTO_DEV_OMAP_SHAM
+ tristate "Support for OMAP SHA1/MD5 hw accelerator"
+ depends on ARCH_OMAP2 || ARCH_OMAP3
+ select CRYPTO_SHA1
+ select CRYPTO_MD5
+ help
+ OMAP processors have SHA1/MD5 hw accelerator. Select this if you
+ want to use the OMAP module for SHA1/MD5 algorithms.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 6ffcb3f7f942..6dbbe00c4524 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,8 +1,12 @@
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
+obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
+n2_crypto-objs := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
+obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 6c4c8b7ce3aa..983530ba04a7 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1158,7 +1158,7 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
- rc = of_address_to_resource(ofdev->node, 0, &res);
+ rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (rc)
return -ENODEV;
@@ -1215,13 +1215,13 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
(unsigned long) dev);
/* Register for Crypto isr, Crypto Engine IRQ */
- core_dev->irq = irq_of_parse_and_map(ofdev->node, 0);
+ core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
core_dev->dev->name, dev);
if (rc)
goto err_request_irq;
- core_dev->dev->ce_base = of_iomap(ofdev->node, 0);
+ core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
goto err_iomap;
@@ -1281,8 +1281,11 @@ static const struct of_device_id crypto4xx_match[] = {
};
static struct of_platform_driver crypto4xx_driver = {
- .name = "crypto4xx",
- .match_table = crypto4xx_match,
+ .driver = {
+ .name = "crypto4xx",
+ .owner = THIS_MODULE,
+ .of_match_table = crypto4xx_match,
+ },
.probe = crypto4xx_probe,
.remove = crypto4xx_remove,
};
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index c7a5a43ba691..09389dd2f96b 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -15,14 +15,14 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
-#include <asm/io.h>
-#include <asm/delay.h>
+#include <linux/io.h>
+#include <linux/delay.h>
#include "geode-aes.h"
/* Static structures */
-static void __iomem * _iobase;
+static void __iomem *_iobase;
static spinlock_t lock;
/* Write a 128 bit field (either a writable key or IV) */
@@ -30,7 +30,7 @@ static inline void
_writefield(u32 offset, void *value)
{
int i;
- for(i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++)
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
}
@@ -39,7 +39,7 @@ static inline void
_readfield(u32 offset, void *value)
{
int i;
- for(i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++)
((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
}
@@ -59,7 +59,7 @@ do_crypt(void *src, void *dst, int len, u32 flags)
do {
status = ioread32(_iobase + AES_INTR_REG);
cpu_relax();
- } while(!(status & AES_INTRA_PENDING) && --counter);
+ } while (!(status & AES_INTRA_PENDING) && --counter);
/* Clear the event */
iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
@@ -317,7 +317,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
@@ -349,7 +349,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
@@ -429,7 +429,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
@@ -459,7 +459,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
@@ -518,11 +518,12 @@ static int __devinit
geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
-
- if ((ret = pci_enable_device(dev)))
+ ret = pci_enable_device(dev);
+ if (ret)
return ret;
- if ((ret = pci_request_regions(dev, "geode-aes")))
+ ret = pci_request_regions(dev, "geode-aes");
+ if (ret)
goto eenable;
_iobase = pci_iomap(dev, 0, 0);
@@ -537,13 +538,16 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Clear any pending activity */
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
- if ((ret = crypto_register_alg(&geode_alg)))
+ ret = crypto_register_alg(&geode_alg);
+ if (ret)
goto eiomap;
- if ((ret = crypto_register_alg(&geode_ecb_alg)))
+ ret = crypto_register_alg(&geode_ecb_alg);
+ if (ret)
goto ealg;
- if ((ret = crypto_register_alg(&geode_cbc_alg)))
+ ret = crypto_register_alg(&geode_cbc_alg);
+ if (ret)
goto eecb;
printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 73e8b1713b54..16fce3aadf4d 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -638,7 +638,7 @@ struct hifn_crypto_alg
#define ASYNC_FLAGS_MISALIGNED (1<<0)
-struct ablkcipher_walk
+struct hifn_cipher_walk
{
struct scatterlist cache[ASYNC_SCATTERLIST_CACHE];
u32 flags;
@@ -657,7 +657,7 @@ struct hifn_request_context
u8 *iv;
unsigned int ivsize;
u8 op, type, mode, unused;
- struct ablkcipher_walk walk;
+ struct hifn_cipher_walk walk;
};
#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg)
@@ -1417,7 +1417,7 @@ static int hifn_setup_dma(struct hifn_device *dev,
return 0;
}
-static int ablkcipher_walk_init(struct ablkcipher_walk *w,
+static int hifn_cipher_walk_init(struct hifn_cipher_walk *w,
int num, gfp_t gfp_flags)
{
int i;
@@ -1442,7 +1442,7 @@ static int ablkcipher_walk_init(struct ablkcipher_walk *w,
return i;
}
-static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
+static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
{
int i;
@@ -1486,8 +1486,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
return idx;
}
-static int ablkcipher_walk(struct ablkcipher_request *req,
- struct ablkcipher_walk *w)
+static int hifn_cipher_walk(struct ablkcipher_request *req,
+ struct hifn_cipher_walk *w)
{
struct scatterlist *dst, *t;
unsigned int nbytes = req->nbytes, offset, copy, diff;
@@ -1600,12 +1600,12 @@ static int hifn_setup_session(struct ablkcipher_request *req)
}
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
- err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
+ err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
if (err < 0)
return err;
}
- sg_num = ablkcipher_walk(req, &rctx->walk);
+ sg_num = hifn_cipher_walk(req, &rctx->walk);
if (sg_num < 0) {
err = sg_num;
goto err_out_exit;
@@ -1806,7 +1806,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
kunmap_atomic(saddr, KM_SOFTIRQ0);
}
- ablkcipher_walk_exit(&rctx->walk);
+ hifn_cipher_walk_exit(&rctx->walk);
}
req->base.complete(&req->base, error);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 6f29012bcc43..e095422b58dd 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -15,8 +15,14 @@
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
#include "mv_cesa.h"
+
+#define MV_CESA "MV-CESA:"
+#define MAX_HW_HASH_SIZE 0xFFFF
+
/*
* STM:
* /---------------------------------------\
@@ -39,10 +45,12 @@ enum engine_status {
* @dst_sg_it: sg iterator for dst
* @sg_src_left: bytes left in src to process (scatter list)
* @src_start: offset to add to src start position (scatter list)
- * @crypt_len: length of current crypt process
+ * @crypt_len: length of current hw crypt/hash process
+ * @hw_nbytes: total bytes to process in hw for this request
+ * @copy_back: whether to copy data back (crypt) or not (hash)
* @sg_dst_left: bytes left dst to process in this scatter list
* @dst_start: offset to add to dst start position (scatter list)
- * @total_req_bytes: total number of bytes processed (request).
+ * @hw_processed_bytes: number of bytes processed by hw (request).
*
* sg helper are used to iterate over the scatterlist. Since the size of the
* SRAM may be less than the scatter size, this struct struct is used to keep
@@ -51,15 +59,19 @@ enum engine_status {
struct req_progress {
struct sg_mapping_iter src_sg_it;
struct sg_mapping_iter dst_sg_it;
+ void (*complete) (void);
+ void (*process) (int is_first);
/* src mostly */
int sg_src_left;
int src_start;
int crypt_len;
+ int hw_nbytes;
/* dst mostly */
+ int copy_back;
int sg_dst_left;
int dst_start;
- int total_req_bytes;
+ int hw_processed_bytes;
};
struct crypto_priv {
@@ -72,10 +84,12 @@ struct crypto_priv {
spinlock_t lock;
struct crypto_queue queue;
enum engine_status eng_st;
- struct ablkcipher_request *cur_req;
+ struct crypto_async_request *cur_req;
struct req_progress p;
int max_req_size;
int sram_size;
+ int has_sha1;
+ int has_hmac_sha1;
};
static struct crypto_priv *cpg;
@@ -97,6 +111,31 @@ struct mv_req_ctx {
int decrypt;
};
+enum hash_op {
+ COP_SHA1,
+ COP_HMAC_SHA1
+};
+
+struct mv_tfm_hash_ctx {
+ struct crypto_shash *fallback;
+ struct crypto_shash *base_hash;
+ u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
+ int count_add;
+ enum hash_op op;
+};
+
+struct mv_req_hash_ctx {
+ u64 count;
+ u32 state[SHA1_DIGEST_SIZE / 4];
+ u8 buffer[SHA1_BLOCK_SIZE];
+ int first_hash; /* marks that we don't have previous state */
+ int last_chunk; /* marks that this is the 'final' request */
+ int extra_bytes; /* unprocessed bytes in buffer */
+ enum hash_op op;
+ int count_add;
+ struct scatterlist dummysg;
+};
+
static void compute_aes_dec_key(struct mv_ctx *ctx)
{
struct crypto_aes_ctx gen_aes_key;
@@ -144,32 +183,51 @@ static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static void setup_data_in(struct ablkcipher_request *req)
+static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
{
int ret;
- void *buf;
+ void *sbuf;
+ int copied = 0;
- if (!cpg->p.sg_src_left) {
- ret = sg_miter_next(&cpg->p.src_sg_it);
- BUG_ON(!ret);
- cpg->p.sg_src_left = cpg->p.src_sg_it.length;
- cpg->p.src_start = 0;
- }
-
- cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
-
- buf = cpg->p.src_sg_it.addr;
- buf += cpg->p.src_start;
+ while (1) {
+ if (!p->sg_src_left) {
+ ret = sg_miter_next(&p->src_sg_it);
+ BUG_ON(!ret);
+ p->sg_src_left = p->src_sg_it.length;
+ p->src_start = 0;
+ }
- memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+ sbuf = p->src_sg_it.addr + p->src_start;
+
+ if (p->sg_src_left <= len - copied) {
+ memcpy(dbuf + copied, sbuf, p->sg_src_left);
+ copied += p->sg_src_left;
+ p->sg_src_left = 0;
+ if (copied >= len)
+ break;
+ } else {
+ int copy_len = len - copied;
+ memcpy(dbuf + copied, sbuf, copy_len);
+ p->src_start += copy_len;
+ p->sg_src_left -= copy_len;
+ break;
+ }
+ }
+}
- cpg->p.sg_src_left -= cpg->p.crypt_len;
- cpg->p.src_start += cpg->p.crypt_len;
+static void setup_data_in(void)
+{
+ struct req_progress *p = &cpg->p;
+ int data_in_sram =
+ min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
+ copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
+ data_in_sram - p->crypt_len);
+ p->crypt_len = data_in_sram;
}
static void mv_process_current_q(int first_block)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
struct sec_accel_config op;
@@ -179,6 +237,7 @@ static void mv_process_current_q(int first_block)
op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
break;
case COP_AES_CBC:
+ default:
op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
@@ -211,7 +270,7 @@ static void mv_process_current_q(int first_block)
ENC_P_DST(SRAM_DATA_OUT_START);
op.enc_key_p = SRAM_DATA_KEY_P;
- setup_data_in(req);
+ setup_data_in();
op.enc_len = cpg->p.crypt_len;
memcpy(cpg->sram + SRAM_CONFIG, &op,
sizeof(struct sec_accel_config));
@@ -228,91 +287,294 @@ static void mv_process_current_q(int first_block)
static void mv_crypto_algo_completion(void)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ sg_miter_stop(&cpg->p.src_sg_it);
+ sg_miter_stop(&cpg->p.dst_sg_it);
+
if (req_ctx->op != COP_AES_CBC)
return ;
memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
}
+static void mv_process_hash_current(int first_block)
+{
+ struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+ struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+ struct req_progress *p = &cpg->p;
+ struct sec_accel_config op = { 0 };
+ int is_last;
+
+ switch (req_ctx->op) {
+ case COP_SHA1:
+ default:
+ op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
+ break;
+ case COP_HMAC_SHA1:
+ op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
+ break;
+ }
+
+ op.mac_src_p =
+ MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
+ req_ctx->
+ count);
+
+ setup_data_in();
+
+ op.mac_digest =
+ MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
+ op.mac_iv =
+ MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
+ MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
+
+ is_last = req_ctx->last_chunk
+ && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
+ && (req_ctx->count <= MAX_HW_HASH_SIZE);
+ if (req_ctx->first_hash) {
+ if (is_last)
+ op.config |= CFG_NOT_FRAG;
+ else
+ op.config |= CFG_FIRST_FRAG;
+
+ req_ctx->first_hash = 0;
+ } else {
+ if (is_last)
+ op.config |= CFG_LAST_FRAG;
+ else
+ op.config |= CFG_MID_FRAG;
+ }
+
+ memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
+
+ writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+ /* GO */
+ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+ /*
+ * XXX: add timer if the interrupt does not occur for some mystery
+ * reason
+ */
+}
+
+static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
+ struct shash_desc *desc)
+{
+ int i;
+ struct sha1_state shash_state;
+
+ shash_state.count = ctx->count + ctx->count_add;
+ for (i = 0; i < 5; i++)
+ shash_state.state[i] = ctx->state[i];
+ memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
+ return crypto_shash_import(desc, &shash_state);
+}
+
+static int mv_hash_final_fallback(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
+ } desc;
+ int rc;
+
+ desc.shash.tfm = tfm_ctx->fallback;
+ desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ if (unlikely(req_ctx->first_hash)) {
+ crypto_shash_init(&desc.shash);
+ crypto_shash_update(&desc.shash, req_ctx->buffer,
+ req_ctx->extra_bytes);
+ } else {
+ /* only SHA1 for now....
+ */
+ rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
+ if (rc)
+ goto out;
+ }
+ rc = crypto_shash_final(&desc.shash, req->result);
+out:
+ return rc;
+}
+
+static void mv_hash_algo_completion(void)
+{
+ struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+
+ if (ctx->extra_bytes)
+ copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
+ sg_miter_stop(&cpg->p.src_sg_it);
+
+ ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+ ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+ ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+ ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+ ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
+
+ if (likely(ctx->last_chunk)) {
+ if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
+ memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm
+ (req)));
+ } else
+ mv_hash_final_fallback(req);
+ }
+}
+
static void dequeue_complete_req(void)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct crypto_async_request *req = cpg->cur_req;
void *buf;
int ret;
+ cpg->p.hw_processed_bytes += cpg->p.crypt_len;
+ if (cpg->p.copy_back) {
+ int need_copy_len = cpg->p.crypt_len;
+ int sram_offset = 0;
+ do {
+ int dst_copy;
+
+ if (!cpg->p.sg_dst_left) {
+ ret = sg_miter_next(&cpg->p.dst_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+ cpg->p.dst_start = 0;
+ }
- cpg->p.total_req_bytes += cpg->p.crypt_len;
- do {
- int dst_copy;
-
- if (!cpg->p.sg_dst_left) {
- ret = sg_miter_next(&cpg->p.dst_sg_it);
- BUG_ON(!ret);
- cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
- cpg->p.dst_start = 0;
- }
-
- buf = cpg->p.dst_sg_it.addr;
- buf += cpg->p.dst_start;
+ buf = cpg->p.dst_sg_it.addr;
+ buf += cpg->p.dst_start;
- dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+ dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
- memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+ memcpy(buf,
+ cpg->sram + SRAM_DATA_OUT_START + sram_offset,
+ dst_copy);
+ sram_offset += dst_copy;
+ cpg->p.sg_dst_left -= dst_copy;
+ need_copy_len -= dst_copy;
+ cpg->p.dst_start += dst_copy;
+ } while (need_copy_len > 0);
+ }
- cpg->p.sg_dst_left -= dst_copy;
- cpg->p.crypt_len -= dst_copy;
- cpg->p.dst_start += dst_copy;
- } while (cpg->p.crypt_len > 0);
+ cpg->p.crypt_len = 0;
BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
- if (cpg->p.total_req_bytes < req->nbytes) {
+ if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
/* process next scatter list entry */
cpg->eng_st = ENGINE_BUSY;
- mv_process_current_q(0);
+ cpg->p.process(0);
} else {
- sg_miter_stop(&cpg->p.src_sg_it);
- sg_miter_stop(&cpg->p.dst_sg_it);
- mv_crypto_algo_completion();
+ cpg->p.complete();
cpg->eng_st = ENGINE_IDLE;
- req->base.complete(&req->base, 0);
+ local_bh_disable();
+ req->complete(req, 0);
+ local_bh_enable();
}
}
static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
{
int i = 0;
-
- do {
- total_bytes -= sl[i].length;
- i++;
-
- } while (total_bytes > 0);
+ size_t cur_len;
+
+ while (1) {
+ cur_len = sl[i].length;
+ ++i;
+ if (total_bytes > cur_len)
+ total_bytes -= cur_len;
+ else
+ break;
+ }
return i;
}
-static void mv_enqueue_new_req(struct ablkcipher_request *req)
+static void mv_start_new_crypt_req(struct ablkcipher_request *req)
{
+ struct req_progress *p = &cpg->p;
int num_sgs;
- cpg->cur_req = req;
- memset(&cpg->p, 0, sizeof(struct req_progress));
+ cpg->cur_req = &req->base;
+ memset(p, 0, sizeof(struct req_progress));
+ p->hw_nbytes = req->nbytes;
+ p->complete = mv_crypto_algo_completion;
+ p->process = mv_process_current_q;
+ p->copy_back = 1;
num_sgs = count_sgs(req->src, req->nbytes);
- sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
num_sgs = count_sgs(req->dst, req->nbytes);
- sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+ sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+
mv_process_current_q(1);
}
+static void mv_start_new_hash_req(struct ahash_request *req)
+{
+ struct req_progress *p = &cpg->p;
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ int num_sgs, hw_bytes, old_extra_bytes, rc;
+ cpg->cur_req = &req->base;
+ memset(p, 0, sizeof(struct req_progress));
+ hw_bytes = req->nbytes + ctx->extra_bytes;
+ old_extra_bytes = ctx->extra_bytes;
+
+ if (unlikely(ctx->extra_bytes)) {
+ memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
+ ctx->extra_bytes);
+ p->crypt_len = ctx->extra_bytes;
+ }
+
+ memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
+
+ if (unlikely(!ctx->first_hash)) {
+ writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+ writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+ writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+ writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+ writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ }
+
+ ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
+ if (ctx->extra_bytes != 0
+ && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
+ hw_bytes -= ctx->extra_bytes;
+ else
+ ctx->extra_bytes = 0;
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+ if (hw_bytes) {
+ p->hw_nbytes = hw_bytes;
+ p->complete = mv_hash_algo_completion;
+ p->process = mv_process_hash_current;
+
+ mv_process_hash_current(1);
+ } else {
+ copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
+ ctx->extra_bytes - old_extra_bytes);
+ sg_miter_stop(&p->src_sg_it);
+ if (ctx->last_chunk)
+ rc = mv_hash_final_fallback(req);
+ else
+ rc = 0;
+ cpg->eng_st = ENGINE_IDLE;
+ local_bh_disable();
+ req->base.complete(&req->base, rc);
+ local_bh_enable();
+ }
+}
+
static int queue_manag(void *data)
{
cpg->eng_st = ENGINE_IDLE;
do {
- struct ablkcipher_request *req;
struct crypto_async_request *async_req = NULL;
struct crypto_async_request *backlog;
@@ -338,9 +600,18 @@ static int queue_manag(void *data)
}
if (async_req) {
- req = container_of(async_req,
- struct ablkcipher_request, base);
- mv_enqueue_new_req(req);
+ if (async_req->tfm->__crt_alg->cra_type !=
+ &crypto_ahash_type) {
+ struct ablkcipher_request *req =
+ container_of(async_req,
+ struct ablkcipher_request,
+ base);
+ mv_start_new_crypt_req(req);
+ } else {
+ struct ahash_request *req =
+ ahash_request_cast(async_req);
+ mv_start_new_hash_req(req);
+ }
async_req = NULL;
}
@@ -350,13 +621,13 @@ static int queue_manag(void *data)
return 0;
}
-static int mv_handle_req(struct ablkcipher_request *req)
+static int mv_handle_req(struct crypto_async_request *req)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cpg->lock, flags);
- ret = ablkcipher_enqueue_request(&cpg->queue, req);
+ ret = crypto_enqueue_request(&cpg->queue, req);
spin_unlock_irqrestore(&cpg->lock, flags);
wake_up_process(cpg->queue_th);
return ret;
@@ -369,7 +640,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req)
req_ctx->op = COP_AES_ECB;
req_ctx->decrypt = 0;
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_dec_aes_ecb(struct ablkcipher_request *req)
@@ -381,7 +652,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
req_ctx->decrypt = 1;
compute_aes_dec_key(ctx);
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_enc_aes_cbc(struct ablkcipher_request *req)
@@ -391,7 +662,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
req_ctx->op = COP_AES_CBC;
req_ctx->decrypt = 0;
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_dec_aes_cbc(struct ablkcipher_request *req)
@@ -403,7 +674,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
req_ctx->decrypt = 1;
compute_aes_dec_key(ctx);
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_cra_init(struct crypto_tfm *tfm)
@@ -412,6 +683,215 @@ static int mv_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
+ int is_last, unsigned int req_len,
+ int count_add)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->op = op;
+ ctx->count = req_len;
+ ctx->first_hash = 1;
+ ctx->last_chunk = is_last;
+ ctx->count_add = count_add;
+}
+
+static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
+ unsigned req_len)
+{
+ ctx->last_chunk = is_last;
+ ctx->count += req_len;
+}
+
+static int mv_hash_init(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
+ tfm_ctx->count_add);
+ return 0;
+}
+
+static int mv_hash_update(struct ahash_request *req)
+{
+ if (!req->nbytes)
+ return 0;
+
+ mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_final(struct ahash_request *req)
+{
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+ /* dummy buffer of 4 bytes */
+ sg_init_one(&ctx->dummysg, ctx->buffer, 4);
+ /* I think I'm allowed to do that... */
+ ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
+ mv_update_hash_req_ctx(ctx, 1, 0);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_finup(struct ahash_request *req)
+{
+ if (!req->nbytes)
+ return mv_hash_final(req);
+
+ mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_digest(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
+ req->nbytes, tfm_ctx->count_add);
+ return mv_handle_req(&req->base);
+}
+
+static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
+ const void *ostate)
+{
+ const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
+ int i;
+ for (i = 0; i < 5; i++) {
+ ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
+ ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
+ }
+}
+
+static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
+ unsigned int keylen)
+{
+ int rc;
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+ int bs, ds, ss;
+
+ if (!ctx->base_hash)
+ return 0;
+
+ rc = crypto_shash_setkey(ctx->fallback, key, keylen);
+ if (rc)
+ return rc;
+
+ /* Can't see a way to extract the ipad/opad from the fallback tfm
+ so I'm basically copying code from the hmac module */
+ bs = crypto_shash_blocksize(ctx->base_hash);
+ ds = crypto_shash_digestsize(ctx->base_hash);
+ ss = crypto_shash_statesize(ctx->base_hash);
+
+ {
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(ctx->base_hash)];
+ } desc;
+ unsigned int i;
+ char ipad[ss];
+ char opad[ss];
+
+ desc.shash.tfm = ctx->base_hash;
+ desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ if (keylen > bs) {
+ int err;
+
+ err =
+ crypto_shash_digest(&desc.shash, key, keylen, ipad);
+ if (err)
+ return err;
+
+ keylen = ds;
+ } else
+ memcpy(ipad, key, keylen);
+
+ memset(ipad + keylen, 0, bs - keylen);
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ rc = crypto_shash_init(&desc.shash) ? :
+ crypto_shash_update(&desc.shash, ipad, bs) ? :
+ crypto_shash_export(&desc.shash, ipad) ? :
+ crypto_shash_init(&desc.shash) ? :
+ crypto_shash_update(&desc.shash, opad, bs) ? :
+ crypto_shash_export(&desc.shash, opad);
+
+ if (rc == 0)
+ mv_hash_init_ivs(ctx, ipad, opad);
+
+ return rc;
+ }
+}
+
+static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
+ enum hash_op op, int count_add)
+{
+ const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_shash *fallback_tfm = NULL;
+ struct crypto_shash *base_hash = NULL;
+ int err = -ENOMEM;
+
+ ctx->op = op;
+ ctx->count_add = count_add;
+
+ /* Allocate a fallback and abort if it failed. */
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm)) {
+ printk(KERN_WARNING MV_CESA
+ "Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
+ err = PTR_ERR(fallback_tfm);
+ goto out;
+ }
+ ctx->fallback = fallback_tfm;
+
+ if (base_hash_name) {
+ /* Allocate a hash to compute the ipad/opad of hmac. */
+ base_hash = crypto_alloc_shash(base_hash_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(base_hash)) {
+ printk(KERN_WARNING MV_CESA
+ "Base driver '%s' could not be loaded!\n",
+ base_hash_name);
+ err = PTR_ERR(fallback_tfm);
+ goto err_bad_base;
+ }
+ }
+ ctx->base_hash = base_hash;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct mv_req_hash_ctx) +
+ crypto_shash_descsize(ctx->fallback));
+ return 0;
+err_bad_base:
+ crypto_free_shash(fallback_tfm);
+out:
+ return err;
+}
+
+static void mv_cra_hash_exit(struct crypto_tfm *tfm)
+{
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(ctx->fallback);
+ if (ctx->base_hash)
+ crypto_free_shash(ctx->base_hash);
+}
+
+static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
+{
+ return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
+}
+
+static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
+{
+ return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
+}
+
irqreturn_t crypto_int(int irq, void *priv)
{
u32 val;
@@ -474,6 +954,53 @@ struct crypto_alg mv_aes_alg_cbc = {
},
};
+struct ahash_alg mv_sha1_alg = {
+ .init = mv_hash_init,
+ .update = mv_hash_update,
+ .final = mv_hash_final,
+ .finup = mv_hash_finup,
+ .digest = mv_hash_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "mv-sha1",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+ .cra_init = mv_cra_hash_sha1_init,
+ .cra_exit = mv_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
+struct ahash_alg mv_hmac_sha1_alg = {
+ .init = mv_hash_init,
+ .update = mv_hash_update,
+ .final = mv_hash_final,
+ .finup = mv_hash_finup,
+ .digest = mv_hash_digest,
+ .setkey = mv_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "mv-hmac-sha1",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+ .cra_init = mv_cra_hash_hmac_sha1_init,
+ .cra_exit = mv_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
static int mv_probe(struct platform_device *pdev)
{
struct crypto_priv *cp;
@@ -482,7 +1009,7 @@ static int mv_probe(struct platform_device *pdev)
int ret;
if (cpg) {
- printk(KERN_ERR "Second crypto dev?\n");
+ printk(KERN_ERR MV_CESA "Second crypto dev?\n");
return -EEXIST;
}
@@ -496,7 +1023,7 @@ static int mv_probe(struct platform_device *pdev)
spin_lock_init(&cp->lock);
crypto_init_queue(&cp->queue, 50);
- cp->reg = ioremap(res->start, res->end - res->start + 1);
+ cp->reg = ioremap(res->start, resource_size(res));
if (!cp->reg) {
ret = -ENOMEM;
goto err;
@@ -507,7 +1034,7 @@ static int mv_probe(struct platform_device *pdev)
ret = -ENXIO;
goto err_unmap_reg;
}
- cp->sram_size = res->end - res->start + 1;
+ cp->sram_size = resource_size(res);
cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
cp->sram = ioremap(res->start, cp->sram_size);
if (!cp->sram) {
@@ -546,6 +1073,21 @@ static int mv_probe(struct platform_device *pdev)
ret = crypto_register_alg(&mv_aes_alg_cbc);
if (ret)
goto err_unreg_ecb;
+
+ ret = crypto_register_ahash(&mv_sha1_alg);
+ if (ret == 0)
+ cpg->has_sha1 = 1;
+ else
+ printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
+
+ ret = crypto_register_ahash(&mv_hmac_sha1_alg);
+ if (ret == 0) {
+ cpg->has_hmac_sha1 = 1;
+ } else {
+ printk(KERN_WARNING MV_CESA
+ "Could not register hmac-sha1 driver\n");
+ }
+
return 0;
err_unreg_ecb:
crypto_unregister_alg(&mv_aes_alg_ecb);
@@ -570,6 +1112,10 @@ static int mv_remove(struct platform_device *pdev)
crypto_unregister_alg(&mv_aes_alg_ecb);
crypto_unregister_alg(&mv_aes_alg_cbc);
+ if (cp->has_sha1)
+ crypto_unregister_ahash(&mv_sha1_alg);
+ if (cp->has_hmac_sha1)
+ crypto_unregister_ahash(&mv_hmac_sha1_alg);
kthread_stop(cp->queue_th);
free_irq(cp->irq, cp);
memset(cp->sram, 0, cp->sram_size);
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
index c3e25d3bb171..08fcb1116d90 100644
--- a/drivers/crypto/mv_cesa.h
+++ b/drivers/crypto/mv_cesa.h
@@ -1,6 +1,10 @@
#ifndef __MV_CRYPTO_H__
#define DIGEST_INITIAL_VAL_A 0xdd00
+#define DIGEST_INITIAL_VAL_B 0xdd04
+#define DIGEST_INITIAL_VAL_C 0xdd08
+#define DIGEST_INITIAL_VAL_D 0xdd0c
+#define DIGEST_INITIAL_VAL_E 0xdd10
#define DES_CMD_REG 0xdd58
#define SEC_ACCEL_CMD 0xde00
@@ -70,6 +74,10 @@ struct sec_accel_config {
#define CFG_AES_LEN_128 (0 << 24)
#define CFG_AES_LEN_192 (1 << 24)
#define CFG_AES_LEN_256 (2 << 24)
+#define CFG_NOT_FRAG (0 << 30)
+#define CFG_FIRST_FRAG (1 << 30)
+#define CFG_LAST_FRAG (2 << 30)
+#define CFG_MID_FRAG (3 << 30)
u32 enc_p;
#define ENC_P_SRC(x) (x)
@@ -90,7 +98,11 @@ struct sec_accel_config {
#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
u32 mac_digest;
+#define MAC_DIGEST_P(x) (x)
+#define MAC_FRAG_LEN(x) ((x) << 16)
u32 mac_iv;
+#define MAC_INNER_IV_P(x) (x)
+#define MAC_OUTER_IV_P(x) ((x) << 16)
}__attribute__ ((packed));
/*
* /-----------\ 0
@@ -101,19 +113,37 @@ struct sec_accel_config {
* | IV IN | 4 * 4
* |-----------| 0x40 (inplace)
* | IV BUF | 4 * 4
- * |-----------| 0x50
+ * |-----------| 0x80
* | DATA IN | 16 * x (max ->max_req_size)
- * |-----------| 0x50 (inplace operation)
+ * |-----------| 0x80 (inplace operation)
* | DATA OUT | 16 * x (max ->max_req_size)
* \-----------/ SRAM size
*/
+
+ /* Hashing memory map:
+ * /-----------\ 0
+ * | ACCEL CFG | 4 * 8
+ * |-----------| 0x20
+ * | Inner IV | 5 * 4
+ * |-----------| 0x34
+ * | Outer IV | 5 * 4
+ * |-----------| 0x48
+ * | Output BUF| 5 * 4
+ * |-----------| 0x80
+ * | DATA IN | 64 * x (max ->max_req_size)
+ * \-----------/ SRAM size
+ */
#define SRAM_CONFIG 0x00
#define SRAM_DATA_KEY_P 0x20
#define SRAM_DATA_IV 0x40
#define SRAM_DATA_IV_BUF 0x40
-#define SRAM_DATA_IN_START 0x50
-#define SRAM_DATA_OUT_START 0x50
+#define SRAM_DATA_IN_START 0x80
+#define SRAM_DATA_OUT_START 0x80
+
+#define SRAM_HMAC_IV_IN 0x20
+#define SRAM_HMAC_IV_OUT 0x34
+#define SRAM_DIGEST_BUF 0x48
-#define SRAM_CFG_SPACE 0x50
+#define SRAM_CFG_SPACE 0x80
#endif
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
new file mode 100644
index 000000000000..f7c793745a1e
--- /dev/null
+++ b/drivers/crypto/n2_asm.S
@@ -0,0 +1,95 @@
+/* n2_asm.S: Hypervisor calls for NCS support.
+ *
+ * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/linkage.h>
+#include <asm/hypervisor.h>
+#include "n2_core.h"
+
+ /* o0: queue type
+ * o1: RA of queue
+ * o2: num entries in queue
+ * o3: address of queue handle return
+ */
+ENTRY(sun4v_ncs_qconf)
+ mov HV_FAST_NCS_QCONF, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o3]
+ retl
+ nop
+ENDPROC(sun4v_ncs_qconf)
+
+ /* %o0: queue handle
+ * %o1: address of queue type return
+ * %o2: address of queue base address return
+ * %o3: address of queue num entries return
+ */
+ENTRY(sun4v_ncs_qinfo)
+ mov %o1, %g1
+ mov %o2, %g2
+ mov %o3, %g3
+ mov HV_FAST_NCS_QINFO, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ stx %o2, [%g2]
+ stx %o3, [%g3]
+ retl
+ nop
+ENDPROC(sun4v_ncs_qinfo)
+
+ /* %o0: queue handle
+ * %o1: address of head offset return
+ */
+ENTRY(sun4v_ncs_gethead)
+ mov %o1, %o2
+ mov HV_FAST_NCS_GETHEAD, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o2]
+ retl
+ nop
+ENDPROC(sun4v_ncs_gethead)
+
+ /* %o0: queue handle
+ * %o1: address of tail offset return
+ */
+ENTRY(sun4v_ncs_gettail)
+ mov %o1, %o2
+ mov HV_FAST_NCS_GETTAIL, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o2]
+ retl
+ nop
+ENDPROC(sun4v_ncs_gettail)
+
+ /* %o0: queue handle
+ * %o1: new tail offset
+ */
+ENTRY(sun4v_ncs_settail)
+ mov HV_FAST_NCS_SETTAIL, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_ncs_settail)
+
+ /* %o0: queue handle
+ * %o1: address of devino return
+ */
+ENTRY(sun4v_ncs_qhandle_to_devino)
+ mov %o1, %o2
+ mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o2]
+ retl
+ nop
+ENDPROC(sun4v_ncs_qhandle_to_devino)
+
+ /* %o0: queue handle
+ * %o1: new head offset
+ */
+ENTRY(sun4v_ncs_sethead_marker)
+ mov HV_FAST_NCS_SETHEAD_MARKER, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
new file mode 100644
index 000000000000..23163fda5035
--- /dev/null
+++ b/drivers/crypto/n2_core.c
@@ -0,0 +1,2090 @@
+/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
+ *
+ * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+
+#include "n2_core.h"
+
+#define DRV_MODULE_NAME "n2_crypto"
+#define DRV_MODULE_VERSION "0.1"
+#define DRV_MODULE_RELDATE "April 29, 2010"
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Niagara2 Crypto driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define N2_CRA_PRIORITY 300
+
+static DEFINE_MUTEX(spu_lock);
+
+struct spu_queue {
+ cpumask_t sharing;
+ unsigned long qhandle;
+
+ spinlock_t lock;
+ u8 q_type;
+ void *q;
+ unsigned long head;
+ unsigned long tail;
+ struct list_head jobs;
+
+ unsigned long devino;
+
+ char irq_name[32];
+ unsigned int irq;
+
+ struct list_head list;
+};
+
+static struct spu_queue **cpu_to_cwq;
+static struct spu_queue **cpu_to_mau;
+
+static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
+{
+ if (q->q_type == HV_NCS_QTYPE_MAU) {
+ off += MAU_ENTRY_SIZE;
+ if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
+ off = 0;
+ } else {
+ off += CWQ_ENTRY_SIZE;
+ if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
+ off = 0;
+ }
+ return off;
+}
+
+struct n2_request_common {
+ struct list_head entry;
+ unsigned int offset;
+};
+#define OFFSET_NOT_RUNNING (~(unsigned int)0)
+
+/* An async job request records the final tail value it used in
+ * n2_request_common->offset, test to see if that offset is in
+ * the range old_head, new_head, inclusive.
+ */
+static inline bool job_finished(struct spu_queue *q, unsigned int offset,
+ unsigned long old_head, unsigned long new_head)
+{
+ if (old_head <= new_head) {
+ if (offset > old_head && offset <= new_head)
+ return true;
+ } else {
+ if (offset > old_head || offset <= new_head)
+ return true;
+ }
+ return false;
+}
+
+/* When the HEAD marker is unequal to the actual HEAD, we get
+ * a virtual device INO interrupt. We should process the
+ * completed CWQ entries and adjust the HEAD marker to clear
+ * the IRQ.
+ */
+static irqreturn_t cwq_intr(int irq, void *dev_id)
+{
+ unsigned long off, new_head, hv_ret;
+ struct spu_queue *q = dev_id;
+
+ pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
+ smp_processor_id(), q->qhandle);
+
+ spin_lock(&q->lock);
+
+ hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
+
+ pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
+ smp_processor_id(), new_head, hv_ret);
+
+ for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
+ /* XXX ... XXX */
+ }
+
+ hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
+ if (hv_ret == HV_EOK)
+ q->head = new_head;
+
+ spin_unlock(&q->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mau_intr(int irq, void *dev_id)
+{
+ struct spu_queue *q = dev_id;
+ unsigned long head, hv_ret;
+
+ spin_lock(&q->lock);
+
+ pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
+ smp_processor_id(), q->qhandle);
+
+ hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
+
+ pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
+ smp_processor_id(), head, hv_ret);
+
+ sun4v_ncs_sethead_marker(q->qhandle, head);
+
+ spin_unlock(&q->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void *spu_queue_next(struct spu_queue *q, void *cur)
+{
+ return q->q + spu_next_offset(q, cur - q->q);
+}
+
+static int spu_queue_num_free(struct spu_queue *q)
+{
+ unsigned long head = q->head;
+ unsigned long tail = q->tail;
+ unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
+ unsigned long diff;
+
+ if (head > tail)
+ diff = head - tail;
+ else
+ diff = (end - tail) + head;
+
+ return (diff / CWQ_ENTRY_SIZE) - 1;
+}
+
+static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
+{
+ int avail = spu_queue_num_free(q);
+
+ if (avail >= num_entries)
+ return q->q + q->tail;
+
+ return NULL;
+}
+
+static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
+{
+ unsigned long hv_ret, new_tail;
+
+ new_tail = spu_next_offset(q, last - q->q);
+
+ hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
+ if (hv_ret == HV_EOK)
+ q->tail = new_tail;
+ return hv_ret;
+}
+
+static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
+ int enc_type, int auth_type,
+ unsigned int hash_len,
+ bool sfas, bool sob, bool eob, bool encrypt,
+ int opcode)
+{
+ u64 word = (len - 1) & CONTROL_LEN;
+
+ word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
+ word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
+ word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
+ if (sfas)
+ word |= CONTROL_STORE_FINAL_AUTH_STATE;
+ if (sob)
+ word |= CONTROL_START_OF_BLOCK;
+ if (eob)
+ word |= CONTROL_END_OF_BLOCK;
+ if (encrypt)
+ word |= CONTROL_ENCRYPT;
+ if (hmac_key_len)
+ word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
+ if (hash_len)
+ word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
+
+ return word;
+}
+
+#if 0
+static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
+{
+ if (this_len >= 64 ||
+ qp->head != qp->tail)
+ return true;
+ return false;
+}
+#endif
+
+struct n2_base_ctx {
+ struct list_head list;
+};
+
+static void n2_base_ctx_init(struct n2_base_ctx *ctx)
+{
+ INIT_LIST_HEAD(&ctx->list);
+}
+
+struct n2_hash_ctx {
+ struct n2_base_ctx base;
+
+ struct crypto_ahash *fallback_tfm;
+};
+
+struct n2_hash_req_ctx {
+ union {
+ struct md5_state md5;
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ } u;
+
+ unsigned char hash_key[64];
+ unsigned char keyed_zero_hash[32];
+
+ struct ahash_request fallback_req;
+};
+
+static int n2_hash_async_init(struct ahash_request *req)
+{
+ struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int n2_hash_async_update(struct ahash_request *req)
+{
+ struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int n2_hash_async_final(struct ahash_request *req)
+{
+ struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int n2_hash_async_finup(struct ahash_request *req)
+{
+ struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int n2_hash_cra_init(struct crypto_tfm *tfm)
+{
+ const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct crypto_ahash *fallback_tfm;
+ int err;
+
+ fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm)) {
+ pr_warning("Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
+ err = PTR_ERR(fallback_tfm);
+ goto out;
+ }
+
+ crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
+ crypto_ahash_reqsize(fallback_tfm)));
+
+ ctx->fallback_tfm = fallback_tfm;
+ return 0;
+
+out:
+ return err;
+}
+
+static void n2_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
+ crypto_free_ahash(ctx->fallback_tfm);
+}
+
+static unsigned long wait_for_tail(struct spu_queue *qp)
+{
+ unsigned long head, hv_ret;
+
+ do {
+ hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
+ if (hv_ret != HV_EOK) {
+ pr_err("Hypervisor error on gethead\n");
+ break;
+ }
+ if (head == qp->tail) {
+ qp->head = head;
+ break;
+ }
+ } while (1);
+ return hv_ret;
+}
+
+static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
+ struct cwq_initial_entry *ent)
+{
+ unsigned long hv_ret = spu_queue_submit(qp, ent);
+
+ if (hv_ret == HV_EOK)
+ hv_ret = wait_for_tail(qp);
+
+ return hv_ret;
+}
+
+static int n2_hash_async_digest(struct ahash_request *req,
+ unsigned int auth_type, unsigned int digest_size,
+ unsigned int result_size, void *hash_loc)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cwq_initial_entry *ent;
+ struct crypto_hash_walk walk;
+ struct spu_queue *qp;
+ unsigned long flags;
+ int err = -ENODEV;
+ int nbytes, cpu;
+
+ /* The total effective length of the operation may not
+ * exceed 2^16.
+ */
+ if (unlikely(req->nbytes > (1 << 16))) {
+ struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_digest(&rctx->fallback_req);
+ }
+
+ n2_base_ctx_init(&ctx->base);
+
+ nbytes = crypto_hash_walk_first(req, &walk);
+
+ cpu = get_cpu();
+ qp = cpu_to_cwq[cpu];
+ if (!qp)
+ goto out;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ /* XXX can do better, improve this later by doing a by-hand scatterlist
+ * XXX walk, etc.
+ */
+ ent = qp->q + qp->tail;
+
+ ent->control = control_word_base(nbytes, 0, 0,
+ auth_type, digest_size,
+ false, true, false, false,
+ OPCODE_INPLACE_BIT |
+ OPCODE_AUTH_MAC);
+ ent->src_addr = __pa(walk.data);
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = __pa(hash_loc);
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = 0UL;
+ ent->enc_iv_addr = 0UL;
+ ent->dest_addr = __pa(hash_loc);
+
+ nbytes = crypto_hash_walk_done(&walk, 0);
+ while (nbytes > 0) {
+ ent = spu_queue_next(qp, ent);
+
+ ent->control = (nbytes - 1);
+ ent->src_addr = __pa(walk.data);
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = 0UL;
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = 0UL;
+ ent->enc_iv_addr = 0UL;
+ ent->dest_addr = 0UL;
+
+ nbytes = crypto_hash_walk_done(&walk, 0);
+ }
+ ent->control |= CONTROL_END_OF_BLOCK;
+
+ if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
+ err = -EINVAL;
+ else
+ err = 0;
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ if (!err)
+ memcpy(req->result, hash_loc, result_size);
+out:
+ put_cpu();
+
+ return err;
+}
+
+static int n2_md5_async_digest(struct ahash_request *req)
+{
+ struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+ struct md5_state *m = &rctx->u.md5;
+
+ if (unlikely(req->nbytes == 0)) {
+ static const char md5_zero[MD5_DIGEST_SIZE] = {
+ 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
+ };
+
+ memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
+ return 0;
+ }
+ m->hash[0] = cpu_to_le32(0x67452301);
+ m->hash[1] = cpu_to_le32(0xefcdab89);
+ m->hash[2] = cpu_to_le32(0x98badcfe);
+ m->hash[3] = cpu_to_le32(0x10325476);
+
+ return n2_hash_async_digest(req, AUTH_TYPE_MD5,
+ MD5_DIGEST_SIZE, MD5_DIGEST_SIZE,
+ m->hash);
+}
+
+static int n2_sha1_async_digest(struct ahash_request *req)
+{
+ struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha1_state *s = &rctx->u.sha1;
+
+ if (unlikely(req->nbytes == 0)) {
+ static const char sha1_zero[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
+ 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
+ 0x07, 0x09
+ };
+
+ memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
+ return 0;
+ }
+ s->state[0] = SHA1_H0;
+ s->state[1] = SHA1_H1;
+ s->state[2] = SHA1_H2;
+ s->state[3] = SHA1_H3;
+ s->state[4] = SHA1_H4;
+
+ return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
+ SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
+ s->state);
+}
+
+static int n2_sha256_async_digest(struct ahash_request *req)
+{
+ struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha256_state *s = &rctx->u.sha256;
+
+ if (req->nbytes == 0) {
+ static const char sha256_zero[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
+ 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
+ 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
+ 0x1b, 0x78, 0x52, 0xb8, 0x55
+ };
+
+ memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
+ return 0;
+ }
+ s->state[0] = SHA256_H0;
+ s->state[1] = SHA256_H1;
+ s->state[2] = SHA256_H2;
+ s->state[3] = SHA256_H3;
+ s->state[4] = SHA256_H4;
+ s->state[5] = SHA256_H5;
+ s->state[6] = SHA256_H6;
+ s->state[7] = SHA256_H7;
+
+ return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
+ SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
+ s->state);
+}
+
+static int n2_sha224_async_digest(struct ahash_request *req)
+{
+ struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha256_state *s = &rctx->u.sha256;
+
+ if (req->nbytes == 0) {
+ static const char sha224_zero[SHA224_DIGEST_SIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+ 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+ 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+ 0x2f
+ };
+
+ memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE);
+ return 0;
+ }
+ s->state[0] = SHA224_H0;
+ s->state[1] = SHA224_H1;
+ s->state[2] = SHA224_H2;
+ s->state[3] = SHA224_H3;
+ s->state[4] = SHA224_H4;
+ s->state[5] = SHA224_H5;
+ s->state[6] = SHA224_H6;
+ s->state[7] = SHA224_H7;
+
+ return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
+ SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE,
+ s->state);
+}
+
+struct n2_cipher_context {
+ int key_len;
+ int enc_type;
+ union {
+ u8 aes[AES_MAX_KEY_SIZE];
+ u8 des[DES_KEY_SIZE];
+ u8 des3[3 * DES_KEY_SIZE];
+ u8 arc4[258]; /* S-box, X, Y */
+ } key;
+};
+
+#define N2_CHUNK_ARR_LEN 16
+
+struct n2_crypto_chunk {
+ struct list_head entry;
+ unsigned long iv_paddr : 44;
+ unsigned long arr_len : 20;
+ unsigned long dest_paddr;
+ unsigned long dest_final;
+ struct {
+ unsigned long src_paddr : 44;
+ unsigned long src_len : 20;
+ } arr[N2_CHUNK_ARR_LEN];
+};
+
+struct n2_request_context {
+ struct ablkcipher_walk walk;
+ struct list_head chunk_list;
+ struct n2_crypto_chunk chunk;
+ u8 temp_iv[16];
+};
+
+/* The SPU allows some level of flexibility for partial cipher blocks
+ * being specified in a descriptor.
+ *
+ * It merely requires that every descriptor's length field is at least
+ * as large as the cipher block size. This means that a cipher block
+ * can span at most 2 descriptors. However, this does not allow a
+ * partial block to span into the final descriptor as that would
+ * violate the rule (since every descriptor's length must be at lest
+ * the block size). So, for example, assuming an 8 byte block size:
+ *
+ * 0xe --> 0xa --> 0x8
+ *
+ * is a valid length sequence, whereas:
+ *
+ * 0xe --> 0xb --> 0x7
+ *
+ * is not a valid sequence.
+ */
+
+struct n2_cipher_alg {
+ struct list_head entry;
+ u8 enc_type;
+ struct crypto_alg alg;
+};
+
+static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ return container_of(alg, struct n2_cipher_alg, alg);
+}
+
+struct n2_cipher_request_context {
+ struct ablkcipher_walk walk;
+};
+
+static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+ ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->enc_type |= ENC_TYPE_ALG_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->enc_type |= ENC_TYPE_ALG_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->enc_type |= ENC_TYPE_ALG_AES256;
+ break;
+ default:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->key_len = keylen;
+ memcpy(ctx->key.aes, key, keylen);
+ return 0;
+}
+
+static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+ int err;
+
+ ctx->enc_type = n2alg->enc_type;
+
+ if (keylen != DES_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ err = des_ekey(tmp, key);
+ if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ ctx->key_len = keylen;
+ memcpy(ctx->key.des, key, keylen);
+ return 0;
+}
+
+static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+ ctx->enc_type = n2alg->enc_type;
+
+ if (keylen != (3 * DES_KEY_SIZE)) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->key_len = keylen;
+ memcpy(ctx->key.des3, key, keylen);
+ return 0;
+}
+
+static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ u8 *s = ctx->key.arc4;
+ u8 *x = s + 256;
+ u8 *y = x + 1;
+ int i, j, k;
+
+ ctx->enc_type = n2alg->enc_type;
+
+ j = k = 0;
+ *x = 0;
+ *y = 0;
+ for (i = 0; i < 256; i++)
+ s[i] = i;
+ for (i = 0; i < 256; i++) {
+ u8 a = s[i];
+ j = (j + key[k] + a) & 0xff;
+ s[i] = s[j];
+ s[j] = a;
+ if (++k >= keylen)
+ k = 0;
+ }
+
+ return 0;
+}
+
+static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
+{
+ int this_len = nbytes;
+
+ this_len -= (nbytes & (block_size - 1));
+ return this_len > (1 << 16) ? (1 << 16) : this_len;
+}
+
+static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
+ struct spu_queue *qp, bool encrypt)
+{
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct cwq_initial_entry *ent;
+ bool in_place;
+ int i;
+
+ ent = spu_queue_alloc(qp, cp->arr_len);
+ if (!ent) {
+ pr_info("queue_alloc() of %d fails\n",
+ cp->arr_len);
+ return -EBUSY;
+ }
+
+ in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
+
+ ent->control = control_word_base(cp->arr[0].src_len,
+ 0, ctx->enc_type, 0, 0,
+ false, true, false, encrypt,
+ OPCODE_ENCRYPT |
+ (in_place ? OPCODE_INPLACE_BIT : 0));
+ ent->src_addr = cp->arr[0].src_paddr;
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = 0UL;
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = __pa(&ctx->key);
+ ent->enc_iv_addr = cp->iv_paddr;
+ ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
+
+ for (i = 1; i < cp->arr_len; i++) {
+ ent = spu_queue_next(qp, ent);
+
+ ent->control = cp->arr[i].src_len - 1;
+ ent->src_addr = cp->arr[i].src_paddr;
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = 0UL;
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = 0UL;
+ ent->enc_iv_addr = 0UL;
+ ent->dest_addr = 0UL;
+ }
+ ent->control |= CONTROL_END_OF_BLOCK;
+
+ return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
+}
+
+static int n2_compute_chunks(struct ablkcipher_request *req)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct ablkcipher_walk *walk = &rctx->walk;
+ struct n2_crypto_chunk *chunk;
+ unsigned long dest_prev;
+ unsigned int tot_len;
+ bool prev_in_place;
+ int err, nbytes;
+
+ ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
+ err = ablkcipher_walk_phys(req, walk);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&rctx->chunk_list);
+
+ chunk = &rctx->chunk;
+ INIT_LIST_HEAD(&chunk->entry);
+
+ chunk->iv_paddr = 0UL;
+ chunk->arr_len = 0;
+ chunk->dest_paddr = 0UL;
+
+ prev_in_place = false;
+ dest_prev = ~0UL;
+ tot_len = 0;
+
+ while ((nbytes = walk->nbytes) != 0) {
+ unsigned long dest_paddr, src_paddr;
+ bool in_place;
+ int this_len;
+
+ src_paddr = (page_to_phys(walk->src.page) +
+ walk->src.offset);
+ dest_paddr = (page_to_phys(walk->dst.page) +
+ walk->dst.offset);
+ in_place = (src_paddr == dest_paddr);
+ this_len = cipher_descriptor_len(nbytes, walk->blocksize);
+
+ if (chunk->arr_len != 0) {
+ if (in_place != prev_in_place ||
+ (!prev_in_place &&
+ dest_paddr != dest_prev) ||
+ chunk->arr_len == N2_CHUNK_ARR_LEN ||
+ tot_len + this_len > (1 << 16)) {
+ chunk->dest_final = dest_prev;
+ list_add_tail(&chunk->entry,
+ &rctx->chunk_list);
+ chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
+ if (!chunk) {
+ err = -ENOMEM;
+ break;
+ }
+ INIT_LIST_HEAD(&chunk->entry);
+ }
+ }
+ if (chunk->arr_len == 0) {
+ chunk->dest_paddr = dest_paddr;
+ tot_len = 0;
+ }
+ chunk->arr[chunk->arr_len].src_paddr = src_paddr;
+ chunk->arr[chunk->arr_len].src_len = this_len;
+ chunk->arr_len++;
+
+ dest_prev = dest_paddr + this_len;
+ prev_in_place = in_place;
+ tot_len += this_len;
+
+ err = ablkcipher_walk_done(req, walk, nbytes - this_len);
+ if (err)
+ break;
+ }
+ if (!err && chunk->arr_len != 0) {
+ chunk->dest_final = dest_prev;
+ list_add_tail(&chunk->entry, &rctx->chunk_list);
+ }
+
+ return err;
+}
+
+static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct n2_crypto_chunk *c, *tmp;
+
+ if (final_iv)
+ memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
+
+ ablkcipher_walk_complete(&rctx->walk);
+ list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+
+}
+
+static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct crypto_tfm *tfm = req->base.tfm;
+ int err = n2_compute_chunks(req);
+ struct n2_crypto_chunk *c, *tmp;
+ unsigned long flags, hv_ret;
+ struct spu_queue *qp;
+
+ if (err)
+ return err;
+
+ qp = cpu_to_cwq[get_cpu()];
+ err = -ENODEV;
+ if (!qp)
+ goto out;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+ err = __n2_crypt_chunk(tfm, c, qp, encrypt);
+ if (err)
+ break;
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+ if (!err) {
+ hv_ret = wait_for_tail(qp);
+ if (hv_ret != HV_EOK)
+ err = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ put_cpu();
+
+out:
+ n2_chunk_complete(req, NULL);
+ return err;
+}
+
+static int n2_encrypt_ecb(struct ablkcipher_request *req)
+{
+ return n2_do_ecb(req, true);
+}
+
+static int n2_decrypt_ecb(struct ablkcipher_request *req)
+{
+ return n2_do_ecb(req, false);
+}
+
+static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned long flags, hv_ret, iv_paddr;
+ int err = n2_compute_chunks(req);
+ struct n2_crypto_chunk *c, *tmp;
+ struct spu_queue *qp;
+ void *final_iv_addr;
+
+ final_iv_addr = NULL;
+
+ if (err)
+ return err;
+
+ qp = cpu_to_cwq[get_cpu()];
+ err = -ENODEV;
+ if (!qp)
+ goto out;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ if (encrypt) {
+ iv_paddr = __pa(rctx->walk.iv);
+ list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
+ entry) {
+ c->iv_paddr = iv_paddr;
+ err = __n2_crypt_chunk(tfm, c, qp, true);
+ if (err)
+ break;
+ iv_paddr = c->dest_final - rctx->walk.blocksize;
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+ final_iv_addr = __va(iv_paddr);
+ } else {
+ list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
+ entry) {
+ if (c == &rctx->chunk) {
+ iv_paddr = __pa(rctx->walk.iv);
+ } else {
+ iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
+ tmp->arr[tmp->arr_len-1].src_len -
+ rctx->walk.blocksize);
+ }
+ if (!final_iv_addr) {
+ unsigned long pa;
+
+ pa = (c->arr[c->arr_len-1].src_paddr +
+ c->arr[c->arr_len-1].src_len -
+ rctx->walk.blocksize);
+ final_iv_addr = rctx->temp_iv;
+ memcpy(rctx->temp_iv, __va(pa),
+ rctx->walk.blocksize);
+ }
+ c->iv_paddr = iv_paddr;
+ err = __n2_crypt_chunk(tfm, c, qp, false);
+ if (err)
+ break;
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+ }
+ if (!err) {
+ hv_ret = wait_for_tail(qp);
+ if (hv_ret != HV_EOK)
+ err = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ put_cpu();
+
+out:
+ n2_chunk_complete(req, err ? NULL : final_iv_addr);
+ return err;
+}
+
+static int n2_encrypt_chaining(struct ablkcipher_request *req)
+{
+ return n2_do_chaining(req, true);
+}
+
+static int n2_decrypt_chaining(struct ablkcipher_request *req)
+{
+ return n2_do_chaining(req, false);
+}
+
+struct n2_cipher_tmpl {
+ const char *name;
+ const char *drv_name;
+ u8 block_size;
+ u8 enc_type;
+ struct ablkcipher_alg ablkcipher;
+};
+
+static const struct n2_cipher_tmpl cipher_tmpls[] = {
+ /* ARC4: only ECB is supported (chaining bits ignored) */
+ { .name = "ecb(arc4)",
+ .drv_name = "ecb-arc4",
+ .block_size = 1,
+ .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = 1,
+ .max_keysize = 256,
+ .setkey = n2_arc4_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+
+ /* DES: ECB CBC and CFB are supported */
+ { .name = "ecb(des)",
+ .drv_name = "ecb-des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_DES |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = n2_des_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+ { .name = "cbc(des)",
+ .drv_name = "cbc-des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_DES |
+ ENC_TYPE_CHAINING_CBC),
+ .ablkcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = n2_des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ { .name = "cfb(des)",
+ .drv_name = "cfb-des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_DES |
+ ENC_TYPE_CHAINING_CFB),
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = n2_des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+
+ /* 3DES: ECB CBC and CFB are supported */
+ { .name = "ecb(des3_ede)",
+ .drv_name = "ecb-3des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_3DES |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = n2_3des_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+ { .name = "cbc(des3_ede)",
+ .drv_name = "cbc-3des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_3DES |
+ ENC_TYPE_CHAINING_CBC),
+ .ablkcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = n2_3des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ { .name = "cfb(des3_ede)",
+ .drv_name = "cfb-3des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_3DES |
+ ENC_TYPE_CHAINING_CFB),
+ .ablkcipher = {
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = n2_3des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ /* AES: ECB CBC and CTR are supported */
+ { .name = "ecb(aes)",
+ .drv_name = "ecb-aes",
+ .block_size = AES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_AES128 |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = n2_aes_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+ { .name = "cbc(aes)",
+ .drv_name = "cbc-aes",
+ .block_size = AES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_AES128 |
+ ENC_TYPE_CHAINING_CBC),
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = n2_aes_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ { .name = "ctr(aes)",
+ .drv_name = "ctr-aes",
+ .block_size = AES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_AES128 |
+ ENC_TYPE_CHAINING_COUNTER),
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = n2_aes_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_encrypt_chaining,
+ },
+ },
+
+};
+#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
+
+static LIST_HEAD(cipher_algs);
+
+struct n2_hash_tmpl {
+ const char *name;
+ int (*digest)(struct ahash_request *req);
+ u8 digest_size;
+ u8 block_size;
+};
+static const struct n2_hash_tmpl hash_tmpls[] = {
+ { .name = "md5",
+ .digest = n2_md5_async_digest,
+ .digest_size = MD5_DIGEST_SIZE,
+ .block_size = MD5_HMAC_BLOCK_SIZE },
+ { .name = "sha1",
+ .digest = n2_sha1_async_digest,
+ .digest_size = SHA1_DIGEST_SIZE,
+ .block_size = SHA1_BLOCK_SIZE },
+ { .name = "sha256",
+ .digest = n2_sha256_async_digest,
+ .digest_size = SHA256_DIGEST_SIZE,
+ .block_size = SHA256_BLOCK_SIZE },
+ { .name = "sha224",
+ .digest = n2_sha224_async_digest,
+ .digest_size = SHA224_DIGEST_SIZE,
+ .block_size = SHA224_BLOCK_SIZE },
+};
+#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
+
+struct n2_ahash_alg {
+ struct list_head entry;
+ struct ahash_alg alg;
+};
+static LIST_HEAD(ahash_algs);
+
+static int algs_registered;
+
+static void __n2_unregister_algs(void)
+{
+ struct n2_cipher_alg *cipher, *cipher_tmp;
+ struct n2_ahash_alg *alg, *alg_tmp;
+
+ list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
+ crypto_unregister_alg(&cipher->alg);
+ list_del(&cipher->entry);
+ kfree(cipher);
+ }
+ list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
+ crypto_unregister_ahash(&alg->alg);
+ list_del(&alg->entry);
+ kfree(alg);
+ }
+}
+
+static int n2_cipher_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
+ return 0;
+}
+
+static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
+{
+ struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct crypto_alg *alg;
+ int err;
+
+ if (!p)
+ return -ENOMEM;
+
+ alg = &p->alg;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
+ alg->cra_priority = N2_CRA_PRIORITY;
+ alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+ alg->cra_blocksize = tmpl->block_size;
+ p->enc_type = tmpl->enc_type;
+ alg->cra_ctxsize = sizeof(struct n2_cipher_context);
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_u.ablkcipher = tmpl->ablkcipher;
+ alg->cra_init = n2_cipher_cra_init;
+ alg->cra_module = THIS_MODULE;
+
+ list_add(&p->entry, &cipher_algs);
+ err = crypto_register_alg(alg);
+ if (err) {
+ list_del(&p->entry);
+ kfree(p);
+ }
+ return err;
+}
+
+static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
+{
+ struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct hash_alg_common *halg;
+ struct crypto_alg *base;
+ struct ahash_alg *ahash;
+ int err;
+
+ if (!p)
+ return -ENOMEM;
+
+ ahash = &p->alg;
+ ahash->init = n2_hash_async_init;
+ ahash->update = n2_hash_async_update;
+ ahash->final = n2_hash_async_final;
+ ahash->finup = n2_hash_async_finup;
+ ahash->digest = tmpl->digest;
+
+ halg = &ahash->halg;
+ halg->digestsize = tmpl->digest_size;
+
+ base = &halg->base;
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
+ base->cra_priority = N2_CRA_PRIORITY;
+ base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK;
+ base->cra_blocksize = tmpl->block_size;
+ base->cra_ctxsize = sizeof(struct n2_hash_ctx);
+ base->cra_module = THIS_MODULE;
+ base->cra_init = n2_hash_cra_init;
+ base->cra_exit = n2_hash_cra_exit;
+
+ list_add(&p->entry, &ahash_algs);
+ err = crypto_register_ahash(ahash);
+ if (err) {
+ list_del(&p->entry);
+ kfree(p);
+ }
+ return err;
+}
+
+static int __devinit n2_register_algs(void)
+{
+ int i, err = 0;
+
+ mutex_lock(&spu_lock);
+ if (algs_registered++)
+ goto out;
+
+ for (i = 0; i < NUM_HASH_TMPLS; i++) {
+ err = __n2_register_one_ahash(&hash_tmpls[i]);
+ if (err) {
+ __n2_unregister_algs();
+ goto out;
+ }
+ }
+ for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
+ err = __n2_register_one_cipher(&cipher_tmpls[i]);
+ if (err) {
+ __n2_unregister_algs();
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&spu_lock);
+ return err;
+}
+
+static void __exit n2_unregister_algs(void)
+{
+ mutex_lock(&spu_lock);
+ if (!--algs_registered)
+ __n2_unregister_algs();
+ mutex_unlock(&spu_lock);
+}
+
+/* To map CWQ queues to interrupt sources, the hypervisor API provides
+ * a devino. This isn't very useful to us because all of the
+ * interrupts listed in the of_device node have been translated to
+ * Linux virtual IRQ cookie numbers.
+ *
+ * So we have to back-translate, going through the 'intr' and 'ino'
+ * property tables of the n2cp MDESC node, matching it with the OF
+ * 'interrupts' property entries, in order to to figure out which
+ * devino goes to which already-translated IRQ.
+ */
+static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
+ unsigned long dev_ino)
+{
+ const unsigned int *dev_intrs;
+ unsigned int intr;
+ int i;
+
+ for (i = 0; i < ip->num_intrs; i++) {
+ if (ip->ino_table[i].ino == dev_ino)
+ break;
+ }
+ if (i == ip->num_intrs)
+ return -ENODEV;
+
+ intr = ip->ino_table[i].intr;
+
+ dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
+ if (!dev_intrs)
+ return -ENODEV;
+
+ for (i = 0; i < dev->num_irqs; i++) {
+ if (dev_intrs[i] == intr)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip,
+ const char *irq_name, struct spu_queue *p,
+ irq_handler_t handler)
+{
+ unsigned long herr;
+ int index;
+
+ herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
+ if (herr)
+ return -EINVAL;
+
+ index = find_devino_index(dev, ip, p->devino);
+ if (index < 0)
+ return index;
+
+ p->irq = dev->irqs[index];
+
+ sprintf(p->irq_name, "%s-%d", irq_name, index);
+
+ return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
+ p->irq_name, p);
+}
+
+static struct kmem_cache *queue_cache[2];
+
+static void *new_queue(unsigned long q_type)
+{
+ return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
+}
+
+static void free_queue(void *p, unsigned long q_type)
+{
+ return kmem_cache_free(queue_cache[q_type - 1], p);
+}
+
+static int queue_cache_init(void)
+{
+ if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+ queue_cache[HV_NCS_QTYPE_MAU - 1] =
+ kmem_cache_create("mau_queue",
+ (MAU_NUM_ENTRIES *
+ MAU_ENTRY_SIZE),
+ MAU_ENTRY_SIZE, 0, NULL);
+ if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+ return -ENOMEM;
+
+ if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
+ queue_cache[HV_NCS_QTYPE_CWQ - 1] =
+ kmem_cache_create("cwq_queue",
+ (CWQ_NUM_ENTRIES *
+ CWQ_ENTRY_SIZE),
+ CWQ_ENTRY_SIZE, 0, NULL);
+ if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
+ kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void queue_cache_destroy(void)
+{
+ kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+}
+
+static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
+{
+ cpumask_var_t old_allowed;
+ unsigned long hv_ret;
+
+ if (cpumask_empty(&p->sharing))
+ return -EINVAL;
+
+ if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_copy(old_allowed, &current->cpus_allowed);
+
+ set_cpus_allowed_ptr(current, &p->sharing);
+
+ hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
+ CWQ_NUM_ENTRIES, &p->qhandle);
+ if (!hv_ret)
+ sun4v_ncs_sethead_marker(p->qhandle, 0);
+
+ set_cpus_allowed_ptr(current, old_allowed);
+
+ free_cpumask_var(old_allowed);
+
+ return (hv_ret ? -EINVAL : 0);
+}
+
+static int spu_queue_setup(struct spu_queue *p)
+{
+ int err;
+
+ p->q = new_queue(p->q_type);
+ if (!p->q)
+ return -ENOMEM;
+
+ err = spu_queue_register(p, p->q_type);
+ if (err) {
+ free_queue(p->q, p->q_type);
+ p->q = NULL;
+ }
+
+ return err;
+}
+
+static void spu_queue_destroy(struct spu_queue *p)
+{
+ unsigned long hv_ret;
+
+ if (!p->q)
+ return;
+
+ hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
+
+ if (!hv_ret)
+ free_queue(p->q, p->q_type);
+}
+
+static void spu_list_destroy(struct list_head *list)
+{
+ struct spu_queue *p, *n;
+
+ list_for_each_entry_safe(p, n, list, list) {
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_to_cwq[i] == p)
+ cpu_to_cwq[i] = NULL;
+ }
+
+ if (p->irq) {
+ free_irq(p->irq, p);
+ p->irq = 0;
+ }
+ spu_queue_destroy(p);
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+/* Walk the backward arcs of a CWQ 'exec-unit' node,
+ * gathering cpu membership information.
+ */
+static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
+ struct of_device *dev,
+ u64 node, struct spu_queue *p,
+ struct spu_queue **table)
+{
+ u64 arc;
+
+ mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
+ u64 tgt = mdesc_arc_target(mdesc, arc);
+ const char *name = mdesc_node_name(mdesc, tgt);
+ const u64 *id;
+
+ if (strcmp(name, "cpu"))
+ continue;
+ id = mdesc_get_property(mdesc, tgt, "id", NULL);
+ if (table[*id] != NULL) {
+ dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
+ dev->dev.of_node->full_name);
+ return -EINVAL;
+ }
+ cpu_set(*id, p->sharing);
+ table[*id] = p;
+ }
+ return 0;
+}
+
+/* Process an 'exec-unit' MDESC node of type 'cwq'. */
+static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
+ struct of_device *dev, struct mdesc_handle *mdesc,
+ u64 node, const char *iname, unsigned long q_type,
+ irq_handler_t handler, struct spu_queue **table)
+{
+ struct spu_queue *p;
+ int err;
+
+ p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
+ if (!p) {
+ dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
+ dev->dev.of_node->full_name);
+ return -ENOMEM;
+ }
+
+ cpus_clear(p->sharing);
+ spin_lock_init(&p->lock);
+ p->q_type = q_type;
+ INIT_LIST_HEAD(&p->jobs);
+ list_add(&p->list, list);
+
+ err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
+ if (err)
+ return err;
+
+ err = spu_queue_setup(p);
+ if (err)
+ return err;
+
+ return spu_map_ino(dev, ip, iname, p, handler);
+}
+
+static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev,
+ struct spu_mdesc_info *ip, struct list_head *list,
+ const char *exec_name, unsigned long q_type,
+ irq_handler_t handler, struct spu_queue **table)
+{
+ int err = 0;
+ u64 node;
+
+ mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
+ const char *type;
+
+ type = mdesc_get_property(mdesc, node, "type", NULL);
+ if (!type || strcmp(type, exec_name))
+ continue;
+
+ err = handle_exec_unit(ip, list, dev, mdesc, node,
+ exec_name, q_type, handler, table);
+ if (err) {
+ spu_list_destroy(list);
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
+ struct spu_mdesc_info *ip)
+{
+ const u64 *intr, *ino;
+ int intr_len, ino_len;
+ int i;
+
+ intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
+ if (!intr)
+ return -ENODEV;
+
+ ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
+ if (!intr)
+ return -ENODEV;
+
+ if (intr_len != ino_len)
+ return -EINVAL;
+
+ ip->num_intrs = intr_len / sizeof(u64);
+ ip->ino_table = kzalloc((sizeof(struct ino_blob) *
+ ip->num_intrs),
+ GFP_KERNEL);
+ if (!ip->ino_table)
+ return -ENOMEM;
+
+ for (i = 0; i < ip->num_intrs; i++) {
+ struct ino_blob *b = &ip->ino_table[i];
+ b->intr = intr[i];
+ b->ino = ino[i];
+ }
+
+ return 0;
+}
+
+static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
+ struct of_device *dev,
+ struct spu_mdesc_info *ip,
+ const char *node_name)
+{
+ const unsigned int *reg;
+ u64 node;
+
+ reg = of_get_property(dev->dev.of_node, "reg", NULL);
+ if (!reg)
+ return -ENODEV;
+
+ mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
+ const char *name;
+ const u64 *chdl;
+
+ name = mdesc_get_property(mdesc, node, "name", NULL);
+ if (!name || strcmp(name, node_name))
+ continue;
+ chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
+ if (!chdl || (*chdl != *reg))
+ continue;
+ ip->cfg_handle = *chdl;
+ return get_irq_props(mdesc, node, ip);
+ }
+
+ return -ENODEV;
+}
+
+static unsigned long n2_spu_hvapi_major;
+static unsigned long n2_spu_hvapi_minor;
+
+static int __devinit n2_spu_hvapi_register(void)
+{
+ int err;
+
+ n2_spu_hvapi_major = 2;
+ n2_spu_hvapi_minor = 0;
+
+ err = sun4v_hvapi_register(HV_GRP_NCS,
+ n2_spu_hvapi_major,
+ &n2_spu_hvapi_minor);
+
+ if (!err)
+ pr_info("Registered NCS HVAPI version %lu.%lu\n",
+ n2_spu_hvapi_major,
+ n2_spu_hvapi_minor);
+
+ return err;
+}
+
+static void n2_spu_hvapi_unregister(void)
+{
+ sun4v_hvapi_unregister(HV_GRP_NCS);
+}
+
+static int global_ref;
+
+static int __devinit grab_global_resources(void)
+{
+ int err = 0;
+
+ mutex_lock(&spu_lock);
+
+ if (global_ref++)
+ goto out;
+
+ err = n2_spu_hvapi_register();
+ if (err)
+ goto out;
+
+ err = queue_cache_init();
+ if (err)
+ goto out_hvapi_release;
+
+ err = -ENOMEM;
+ cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+ GFP_KERNEL);
+ if (!cpu_to_cwq)
+ goto out_queue_cache_destroy;
+
+ cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+ GFP_KERNEL);
+ if (!cpu_to_mau)
+ goto out_free_cwq_table;
+
+ err = 0;
+
+out:
+ if (err)
+ global_ref--;
+ mutex_unlock(&spu_lock);
+ return err;
+
+out_free_cwq_table:
+ kfree(cpu_to_cwq);
+ cpu_to_cwq = NULL;
+
+out_queue_cache_destroy:
+ queue_cache_destroy();
+
+out_hvapi_release:
+ n2_spu_hvapi_unregister();
+ goto out;
+}
+
+static void release_global_resources(void)
+{
+ mutex_lock(&spu_lock);
+ if (!--global_ref) {
+ kfree(cpu_to_cwq);
+ cpu_to_cwq = NULL;
+
+ kfree(cpu_to_mau);
+ cpu_to_mau = NULL;
+
+ queue_cache_destroy();
+ n2_spu_hvapi_unregister();
+ }
+ mutex_unlock(&spu_lock);
+}
+
+static struct n2_crypto * __devinit alloc_n2cp(void)
+{
+ struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
+
+ if (np)
+ INIT_LIST_HEAD(&np->cwq_list);
+
+ return np;
+}
+
+static void free_n2cp(struct n2_crypto *np)
+{
+ if (np->cwq_info.ino_table) {
+ kfree(np->cwq_info.ino_table);
+ np->cwq_info.ino_table = NULL;
+ }
+
+ kfree(np);
+}
+
+static void __devinit n2_spu_driver_version(void)
+{
+ static int n2_spu_version_printed;
+
+ if (n2_spu_version_printed++ == 0)
+ pr_info("%s", version);
+}
+
+static int __devinit n2_crypto_probe(struct of_device *dev,
+ const struct of_device_id *match)
+{
+ struct mdesc_handle *mdesc;
+ const char *full_name;
+ struct n2_crypto *np;
+ int err;
+
+ n2_spu_driver_version();
+
+ full_name = dev->dev.of_node->full_name;
+ pr_info("Found N2CP at %s\n", full_name);
+
+ np = alloc_n2cp();
+ if (!np) {
+ dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
+ full_name);
+ return -ENOMEM;
+ }
+
+ err = grab_global_resources();
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab "
+ "global resources.\n", full_name);
+ goto out_free_n2cp;
+ }
+
+ mdesc = mdesc_grab();
+
+ if (!mdesc) {
+ dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
+ full_name);
+ err = -ENODEV;
+ goto out_free_global;
+ }
+ err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
+ full_name);
+ mdesc_release(mdesc);
+ goto out_free_global;
+ }
+
+ err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
+ "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
+ cpu_to_cwq);
+ mdesc_release(mdesc);
+
+ if (err) {
+ dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
+ full_name);
+ goto out_free_global;
+ }
+
+ err = n2_register_algs();
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
+ full_name);
+ goto out_free_spu_list;
+ }
+
+ dev_set_drvdata(&dev->dev, np);
+
+ return 0;
+
+out_free_spu_list:
+ spu_list_destroy(&np->cwq_list);
+
+out_free_global:
+ release_global_resources();
+
+out_free_n2cp:
+ free_n2cp(np);
+
+ return err;
+}
+
+static int __devexit n2_crypto_remove(struct of_device *dev)
+{
+ struct n2_crypto *np = dev_get_drvdata(&dev->dev);
+
+ n2_unregister_algs();
+
+ spu_list_destroy(&np->cwq_list);
+
+ release_global_resources();
+
+ free_n2cp(np);
+
+ return 0;
+}
+
+static struct n2_mau * __devinit alloc_ncp(void)
+{
+ struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
+
+ if (mp)
+ INIT_LIST_HEAD(&mp->mau_list);
+
+ return mp;
+}
+
+static void free_ncp(struct n2_mau *mp)
+{
+ if (mp->mau_info.ino_table) {
+ kfree(mp->mau_info.ino_table);
+ mp->mau_info.ino_table = NULL;
+ }
+
+ kfree(mp);
+}
+
+static int __devinit n2_mau_probe(struct of_device *dev,
+ const struct of_device_id *match)
+{
+ struct mdesc_handle *mdesc;
+ const char *full_name;
+ struct n2_mau *mp;
+ int err;
+
+ n2_spu_driver_version();
+
+ full_name = dev->dev.of_node->full_name;
+ pr_info("Found NCP at %s\n", full_name);
+
+ mp = alloc_ncp();
+ if (!mp) {
+ dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
+ full_name);
+ return -ENOMEM;
+ }
+
+ err = grab_global_resources();
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab "
+ "global resources.\n", full_name);
+ goto out_free_ncp;
+ }
+
+ mdesc = mdesc_grab();
+
+ if (!mdesc) {
+ dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
+ full_name);
+ err = -ENODEV;
+ goto out_free_global;
+ }
+
+ err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
+ full_name);
+ mdesc_release(mdesc);
+ goto out_free_global;
+ }
+
+ err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
+ "mau", HV_NCS_QTYPE_MAU, mau_intr,
+ cpu_to_mau);
+ mdesc_release(mdesc);
+
+ if (err) {
+ dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
+ full_name);
+ goto out_free_global;
+ }
+
+ dev_set_drvdata(&dev->dev, mp);
+
+ return 0;
+
+out_free_global:
+ release_global_resources();
+
+out_free_ncp:
+ free_ncp(mp);
+
+ return err;
+}
+
+static int __devexit n2_mau_remove(struct of_device *dev)
+{
+ struct n2_mau *mp = dev_get_drvdata(&dev->dev);
+
+ spu_list_destroy(&mp->mau_list);
+
+ release_global_resources();
+
+ free_ncp(mp);
+
+ return 0;
+}
+
+static struct of_device_id n2_crypto_match[] = {
+ {
+ .name = "n2cp",
+ .compatible = "SUNW,n2-cwq",
+ },
+ {
+ .name = "n2cp",
+ .compatible = "SUNW,vf-cwq",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, n2_crypto_match);
+
+static struct of_platform_driver n2_crypto_driver = {
+ .driver = {
+ .name = "n2cp",
+ .owner = THIS_MODULE,
+ .of_match_table = n2_crypto_match,
+ },
+ .probe = n2_crypto_probe,
+ .remove = __devexit_p(n2_crypto_remove),
+};
+
+static struct of_device_id n2_mau_match[] = {
+ {
+ .name = "ncp",
+ .compatible = "SUNW,n2-mau",
+ },
+ {
+ .name = "ncp",
+ .compatible = "SUNW,vf-mau",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, n2_mau_match);
+
+static struct of_platform_driver n2_mau_driver = {
+ .driver = {
+ .name = "ncp",
+ .owner = THIS_MODULE,
+ .of_match_table = n2_mau_match,
+ },
+ .probe = n2_mau_probe,
+ .remove = __devexit_p(n2_mau_remove),
+};
+
+static int __init n2_init(void)
+{
+ int err = of_register_driver(&n2_crypto_driver, &of_bus_type);
+
+ if (!err) {
+ err = of_register_driver(&n2_mau_driver, &of_bus_type);
+ if (err)
+ of_unregister_driver(&n2_crypto_driver);
+ }
+ return err;
+}
+
+static void __exit n2_exit(void)
+{
+ of_unregister_driver(&n2_mau_driver);
+ of_unregister_driver(&n2_crypto_driver);
+}
+
+module_init(n2_init);
+module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
new file mode 100644
index 000000000000..4bcbbeae98f5
--- /dev/null
+++ b/drivers/crypto/n2_core.h
@@ -0,0 +1,231 @@
+#ifndef _N2_CORE_H
+#define _N2_CORE_H
+
+#ifndef __ASSEMBLY__
+
+struct ino_blob {
+ u64 intr;
+ u64 ino;
+};
+
+struct spu_mdesc_info {
+ u64 cfg_handle;
+ struct ino_blob *ino_table;
+ int num_intrs;
+};
+
+struct n2_crypto {
+ struct spu_mdesc_info cwq_info;
+ struct list_head cwq_list;
+};
+
+struct n2_mau {
+ struct spu_mdesc_info mau_info;
+ struct list_head mau_list;
+};
+
+#define CWQ_ENTRY_SIZE 64
+#define CWQ_NUM_ENTRIES 64
+
+#define MAU_ENTRY_SIZE 64
+#define MAU_NUM_ENTRIES 64
+
+struct cwq_initial_entry {
+ u64 control;
+ u64 src_addr;
+ u64 auth_key_addr;
+ u64 auth_iv_addr;
+ u64 final_auth_state_addr;
+ u64 enc_key_addr;
+ u64 enc_iv_addr;
+ u64 dest_addr;
+};
+
+struct cwq_ext_entry {
+ u64 len;
+ u64 src_addr;
+ u64 resv1;
+ u64 resv2;
+ u64 resv3;
+ u64 resv4;
+ u64 resv5;
+ u64 resv6;
+};
+
+struct cwq_final_entry {
+ u64 control;
+ u64 src_addr;
+ u64 resv1;
+ u64 resv2;
+ u64 resv3;
+ u64 resv4;
+ u64 resv5;
+ u64 resv6;
+};
+
+#define CONTROL_LEN 0x000000000000ffffULL
+#define CONTROL_LEN_SHIFT 0
+#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
+#define CONTROL_HMAC_KEY_LEN_SHIFT 16
+#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
+#define CONTROL_ENC_TYPE_SHIFT 24
+#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
+#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
+#define ENC_TYPE_ALG_DES 0x08ULL
+#define ENC_TYPE_ALG_3DES 0x0cULL
+#define ENC_TYPE_ALG_AES128 0x10ULL
+#define ENC_TYPE_ALG_AES192 0x14ULL
+#define ENC_TYPE_ALG_AES256 0x18ULL
+#define ENC_TYPE_ALG_RESERVED 0x1cULL
+#define ENC_TYPE_ALG_MASK 0x1cULL
+#define ENC_TYPE_CHAINING_ECB 0x00ULL
+#define ENC_TYPE_CHAINING_CBC 0x01ULL
+#define ENC_TYPE_CHAINING_CFB 0x02ULL
+#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
+#define ENC_TYPE_CHAINING_MASK 0x03ULL
+#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
+#define CONTROL_AUTH_TYPE_SHIFT 32
+#define AUTH_TYPE_RESERVED 0x00ULL
+#define AUTH_TYPE_MD5 0x01ULL
+#define AUTH_TYPE_SHA1 0x02ULL
+#define AUTH_TYPE_SHA256 0x03ULL
+#define AUTH_TYPE_CRC32 0x04ULL
+#define AUTH_TYPE_HMAC_MD5 0x05ULL
+#define AUTH_TYPE_HMAC_SHA1 0x06ULL
+#define AUTH_TYPE_HMAC_SHA256 0x07ULL
+#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
+#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
+#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
+#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
+#define CONTROL_STRAND 0x000000e000000000ULL
+#define CONTROL_STRAND_SHIFT 37
+#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
+#define CONTROL_HASH_LEN_SHIFT 40
+#define CONTROL_INTERRUPT 0x0001000000000000ULL
+#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
+#define CONTROL_RESERVED 0x001c000000000000ULL
+#define CONTROL_HV_DONE 0x0004000000000000ULL
+#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
+#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
+#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
+#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
+#define CONTROL_ENCRYPT 0x0080000000000000ULL
+#define CONTROL_OPCODE 0xff00000000000000ULL
+#define CONTROL_OPCODE_SHIFT 56
+#define OPCODE_INPLACE_BIT 0x80ULL
+#define OPCODE_SSL_KEYBLOCK 0x10ULL
+#define OPCODE_COPY 0x20ULL
+#define OPCODE_ENCRYPT 0x40ULL
+#define OPCODE_AUTH_MAC 0x41ULL
+
+#endif /* !(__ASSEMBLY__) */
+
+/* NCS v2.0 hypervisor interfaces */
+#define HV_NCS_QTYPE_MAU 0x01
+#define HV_NCS_QTYPE_CWQ 0x02
+
+/* ncs_qconf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_QCONF
+ * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * ARG1: Real address of queue, or handle for unconfigure
+ * ARG2: Number of entries in queue, zero for unconfigure
+ * RET0: status
+ * RET1: queue handle
+ *
+ * Configure a queue in the stream processing unit.
+ *
+ * The real address given as the base must be 64-byte
+ * aligned.
+ *
+ * The queue size can range from a minimum of 2 to a maximum
+ * of 64. The queue size must be a power of two.
+ *
+ * To unconfigure a queue, specify a length of zero and place
+ * the queue handle into ARG1.
+ *
+ * On configure success the hypervisor will set the FIRST, HEAD,
+ * and TAIL registers to the address of the first entry in the
+ * queue. The LAST register will be set to point to the last
+ * entry in the queue.
+ */
+#define HV_FAST_NCS_QCONF 0x111
+
+/* ncs_qinfo()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_QINFO
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * RET2: Queue base address
+ * RET3: Number of entries
+ */
+#define HV_FAST_NCS_QINFO 0x112
+
+/* ncs_gethead()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_GETHEAD
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: queue head offset
+ */
+#define HV_FAST_NCS_GETHEAD 0x113
+
+/* ncs_gettail()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_GETTAIL
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: queue tail offset
+ */
+#define HV_FAST_NCS_GETTAIL 0x114
+
+/* ncs_settail()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_SETTAIL
+ * ARG0: Queue handle
+ * ARG1: New tail offset
+ * RET0: status
+ */
+#define HV_FAST_NCS_SETTAIL 0x115
+
+/* ncs_qhandle_to_devino()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: devino
+ */
+#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
+
+/* ncs_sethead_marker()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
+ * ARG0: Queue handle
+ * ARG1: New head offset
+ * RET0: status
+ */
+#define HV_FAST_NCS_SETHEAD_MARKER 0x117
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
+ unsigned long queue_ra,
+ unsigned long num_entries,
+ unsigned long *qhandle);
+extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
+ unsigned long *queue_type,
+ unsigned long *queue_ra,
+ unsigned long *num_entries);
+extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
+ unsigned long *head);
+extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
+ unsigned long *tail);
+extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
+ unsigned long tail);
+extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
+ unsigned long *devino);
+extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
+ unsigned long head);
+#endif /* !(__ASSEMBLY__) */
+
+#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
new file mode 100644
index 000000000000..8b034337793f
--- /dev/null
+++ b/drivers/crypto/omap-sham.c
@@ -0,0 +1,1259 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP SHA1/MD5 HW acceleration.
+ *
+ * Copyright (c) 2010 Nokia Corporation
+ * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from old omap-sha1-md5.c driver.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/version.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+
+#include <plat/cpu.h>
+#include <plat/dma.h>
+#include <mach/irqs.h>
+
+#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
+#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
+
+#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
+#define MD5_DIGEST_SIZE 16
+
+#define SHA_REG_DIGCNT 0x14
+
+#define SHA_REG_CTRL 0x18
+#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
+#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
+#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
+#define SHA_REG_CTRL_ALGO (1 << 2)
+#define SHA_REG_CTRL_INPUT_READY (1 << 1)
+#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
+
+#define SHA_REG_REV 0x5C
+#define SHA_REG_REV_MAJOR 0xF0
+#define SHA_REG_REV_MINOR 0x0F
+
+#define SHA_REG_MASK 0x60
+#define SHA_REG_MASK_DMA_EN (1 << 3)
+#define SHA_REG_MASK_IT_EN (1 << 2)
+#define SHA_REG_MASK_SOFTRESET (1 << 1)
+#define SHA_REG_AUTOIDLE (1 << 0)
+
+#define SHA_REG_SYSSTATUS 0x64
+#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
+
+#define DEFAULT_TIMEOUT_INTERVAL HZ
+
+#define FLAGS_FIRST 0x0001
+#define FLAGS_FINUP 0x0002
+#define FLAGS_FINAL 0x0004
+#define FLAGS_FAST 0x0008
+#define FLAGS_SHA1 0x0010
+#define FLAGS_DMA_ACTIVE 0x0020
+#define FLAGS_OUTPUT_READY 0x0040
+#define FLAGS_CLEAN 0x0080
+#define FLAGS_INIT 0x0100
+#define FLAGS_CPU 0x0200
+#define FLAGS_HMAC 0x0400
+
+/* 3rd byte */
+#define FLAGS_BUSY 16
+
+#define OP_UPDATE 1
+#define OP_FINAL 2
+
+struct omap_sham_dev;
+
+struct omap_sham_reqctx {
+ struct omap_sham_dev *dd;
+ unsigned long flags;
+ unsigned long op;
+
+ size_t digcnt;
+ u8 *buffer;
+ size_t bufcnt;
+ size_t buflen;
+ dma_addr_t dma_addr;
+
+ /* walk state */
+ struct scatterlist *sg;
+ unsigned int offset; /* offset in current sg */
+ unsigned int total; /* total request */
+};
+
+struct omap_sham_hmac_ctx {
+ struct crypto_shash *shash;
+ u8 ipad[SHA1_MD5_BLOCK_SIZE];
+ u8 opad[SHA1_MD5_BLOCK_SIZE];
+};
+
+struct omap_sham_ctx {
+ struct omap_sham_dev *dd;
+
+ unsigned long flags;
+
+ /* fallback stuff */
+ struct crypto_shash *fallback;
+
+ struct omap_sham_hmac_ctx base[0];
+};
+
+#define OMAP_SHAM_QUEUE_LENGTH 1
+
+struct omap_sham_dev {
+ struct list_head list;
+ unsigned long phys_base;
+ struct device *dev;
+ void __iomem *io_base;
+ int irq;
+ struct clk *iclk;
+ spinlock_t lock;
+ int dma;
+ int dma_lch;
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
+
+ unsigned long flags;
+ struct crypto_queue queue;
+ struct ahash_request *req;
+};
+
+struct omap_sham_drv {
+ struct list_head dev_list;
+ spinlock_t lock;
+ unsigned long flags;
+};
+
+static struct omap_sham_drv sham = {
+ .dev_list = LIST_HEAD_INIT(sham.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
+};
+
+static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
+{
+ return __raw_readl(dd->io_base + offset);
+}
+
+static inline void omap_sham_write(struct omap_sham_dev *dd,
+ u32 offset, u32 value)
+{
+ __raw_writel(value, dd->io_base + offset);
+}
+
+static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
+ u32 value, u32 mask)
+{
+ u32 val;
+
+ val = omap_sham_read(dd, address);
+ val &= ~mask;
+ val |= value;
+ omap_sham_write(dd, address, val);
+}
+
+static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
+{
+ unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
+
+ while (!(omap_sham_read(dd, offset) & bit)) {
+ if (time_is_before_jiffies(timeout))
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void omap_sham_copy_hash(struct ahash_request *req, int out)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ u32 *hash = (u32 *)req->result;
+ int i;
+
+ if (likely(ctx->flags & FLAGS_SHA1)) {
+ /* SHA1 results are in big endian */
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
+ if (out)
+ hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
+ SHA_REG_DIGEST(i)));
+ else
+ omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+ cpu_to_be32(hash[i]));
+ } else {
+ /* MD5 results are in little endian */
+ for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
+ if (out)
+ hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
+ SHA_REG_DIGEST(i)));
+ else
+ omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+ cpu_to_le32(hash[i]));
+ }
+}
+
+static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+ int final, int dma)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ u32 val = length << 5, mask;
+
+ if (unlikely(!ctx->digcnt)) {
+
+ clk_enable(dd->iclk);
+
+ if (!(dd->flags & FLAGS_INIT)) {
+ omap_sham_write_mask(dd, SHA_REG_MASK,
+ SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
+
+ if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
+ SHA_REG_SYSSTATUS_RESETDONE))
+ return -ETIMEDOUT;
+
+ dd->flags |= FLAGS_INIT;
+ }
+ } else {
+ omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
+ }
+
+ omap_sham_write_mask(dd, SHA_REG_MASK,
+ SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
+ SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
+ /*
+ * Setting ALGO_CONST only for the first iteration
+ * and CLOSE_HASH only for the last one.
+ */
+ if (ctx->flags & FLAGS_SHA1)
+ val |= SHA_REG_CTRL_ALGO;
+ if (!ctx->digcnt)
+ val |= SHA_REG_CTRL_ALGO_CONST;
+ if (final)
+ val |= SHA_REG_CTRL_CLOSE_HASH;
+
+ mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
+ SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
+
+ omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
+
+ return 0;
+}
+
+static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
+ size_t length, int final)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int err, count, len32;
+ const u32 *buffer = (const u32 *)buf;
+
+ dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
+ ctx->digcnt, length, final);
+
+ err = omap_sham_write_ctrl(dd, length, final, 0);
+ if (err)
+ return err;
+
+ if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
+ return -ETIMEDOUT;
+
+ ctx->digcnt += length;
+
+ if (final)
+ ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ for (count = 0; count < len32; count++)
+ omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
+
+ return -EINPROGRESS;
+}
+
+static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
+ size_t length, int final)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int err, len32;
+
+ dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
+ ctx->digcnt, length, final);
+
+ /* flush cache entries related to our page */
+ if (dma_addr == ctx->dma_addr)
+ dma_sync_single_for_device(dd->dev, dma_addr, length,
+ DMA_TO_DEVICE);
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
+ 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC);
+
+ omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_addr, 0, 0);
+
+ err = omap_sham_write_ctrl(dd, length, final, 1);
+ if (err)
+ return err;
+
+ ctx->digcnt += length;
+
+ if (final)
+ ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+ dd->flags |= FLAGS_DMA_ACTIVE;
+
+ omap_start_dma(dd->dma_lch);
+
+ return -EINPROGRESS;
+}
+
+static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
+ const u8 *data, size_t length)
+{
+ size_t count = min(length, ctx->buflen - ctx->bufcnt);
+
+ count = min(count, ctx->total);
+ if (count <= 0)
+ return 0;
+ memcpy(ctx->buffer + ctx->bufcnt, data, count);
+ ctx->bufcnt += count;
+
+ return count;
+}
+
+static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
+{
+ size_t count;
+
+ while (ctx->sg) {
+ count = omap_sham_append_buffer(ctx,
+ sg_virt(ctx->sg) + ctx->offset,
+ ctx->sg->length - ctx->offset);
+ if (!count)
+ break;
+ ctx->offset += count;
+ ctx->total -= count;
+ if (ctx->offset == ctx->sg->length) {
+ ctx->sg = sg_next(ctx->sg);
+ if (ctx->sg)
+ ctx->offset = 0;
+ else
+ ctx->total = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ unsigned int final;
+ size_t count;
+
+ if (!ctx->total)
+ return 0;
+
+ omap_sham_append_sg(ctx);
+
+ final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+
+ dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
+ ctx->bufcnt, ctx->digcnt, final);
+
+ if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
+ count = ctx->bufcnt;
+ ctx->bufcnt = 0;
+ return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
+ }
+
+ return 0;
+}
+
+static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ unsigned int length;
+
+ ctx->flags |= FLAGS_FAST;
+
+ length = min(ctx->total, sg_dma_len(ctx->sg));
+ ctx->total = length;
+
+ if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
+ dev_err(dd->dev, "dma_map_sg error\n");
+ return -EINVAL;
+ }
+
+ ctx->total -= length;
+
+ return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
+}
+
+static int omap_sham_update_cpu(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int bufcnt;
+
+ omap_sham_append_sg(ctx);
+ bufcnt = ctx->bufcnt;
+ ctx->bufcnt = 0;
+
+ return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
+}
+
+static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+ omap_stop_dma(dd->dma_lch);
+ if (ctx->flags & FLAGS_FAST)
+ dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static void omap_sham_cleanup(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = ctx->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (ctx->flags & FLAGS_CLEAN) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return;
+ }
+ ctx->flags |= FLAGS_CLEAN;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (ctx->digcnt)
+ clk_disable(dd->iclk);
+
+ if (ctx->dma_addr)
+ dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+ DMA_TO_DEVICE);
+
+ if (ctx->buffer)
+ free_page((unsigned long)ctx->buffer);
+
+ dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
+}
+
+static int omap_sham_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = NULL, *tmp;
+
+ spin_lock_bh(&sham.lock);
+ if (!tctx->dd) {
+ list_for_each_entry(tmp, &sham.dev_list, list) {
+ dd = tmp;
+ break;
+ }
+ tctx->dd = dd;
+ } else {
+ dd = tctx->dd;
+ }
+ spin_unlock_bh(&sham.lock);
+
+ ctx->dd = dd;
+
+ ctx->flags = 0;
+
+ ctx->flags |= FLAGS_FIRST;
+
+ dev_dbg(dd->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+ ctx->flags |= FLAGS_SHA1;
+
+ ctx->bufcnt = 0;
+ ctx->digcnt = 0;
+
+ ctx->buflen = PAGE_SIZE;
+ ctx->buffer = (void *)__get_free_page(
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!ctx->buffer)
+ return -ENOMEM;
+
+ ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+ dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
+ free_page((unsigned long)ctx->buffer);
+ return -EINVAL;
+ }
+
+ if (tctx->flags & FLAGS_HMAC) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+
+ memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
+ ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
+ ctx->flags |= FLAGS_HMAC;
+ }
+
+ return 0;
+
+}
+
+static int omap_sham_update_req(struct omap_sham_dev *dd)
+{
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err;
+
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
+ ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
+
+ if (ctx->flags & FLAGS_CPU)
+ err = omap_sham_update_cpu(dd);
+ else if (ctx->flags & FLAGS_FAST)
+ err = omap_sham_update_dma_fast(dd);
+ else
+ err = omap_sham_update_dma_slow(dd);
+
+ /* wait for dma completion before can take more data */
+ dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
+
+ return err;
+}
+
+static int omap_sham_final_req(struct omap_sham_dev *dd)
+{
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err = 0, use_dma = 1;
+
+ if (ctx->bufcnt <= 64)
+ /* faster to handle last block with cpu */
+ use_dma = 0;
+
+ if (use_dma)
+ err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
+ else
+ err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
+
+ ctx->bufcnt = 0;
+
+ if (err != -EINPROGRESS)
+ omap_sham_cleanup(req);
+
+ dev_dbg(dd->dev, "final_req: err: %d\n", err);
+
+ return err;
+}
+
+static int omap_sham_finish_req_hmac(struct ahash_request *req)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int ds = crypto_shash_digestsize(bctx->shash);
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(bctx->shash)];
+ } desc;
+
+ desc.shash.tfm = bctx->shash;
+ desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
+
+ return crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
+ crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+}
+
+static void omap_sham_finish_req(struct ahash_request *req, int err)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!err) {
+ omap_sham_copy_hash(ctx->dd->req, 1);
+ if (ctx->flags & FLAGS_HMAC)
+ err = omap_sham_finish_req_hmac(req);
+ }
+
+ if (ctx->flags & FLAGS_FINAL)
+ omap_sham_cleanup(req);
+
+ clear_bit(FLAGS_BUSY, &ctx->dd->flags);
+
+ if (req->base.complete)
+ req->base.complete(&req->base, err);
+}
+
+static int omap_sham_handle_queue(struct omap_sham_dev *dd)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct omap_sham_reqctx *ctx;
+ struct ahash_request *req, *prev_req;
+ unsigned long flags;
+ int err = 0;
+
+ if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
+ return 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &dd->flags);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return 0;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ahash_request_cast(async_req);
+
+ prev_req = dd->req;
+ dd->req = req;
+
+ ctx = ahash_request_ctx(req);
+
+ dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+ ctx->op, req->nbytes);
+
+ if (req != prev_req && ctx->digcnt)
+ /* request has changed - restore hash */
+ omap_sham_copy_hash(req, 0);
+
+ if (ctx->op == OP_UPDATE) {
+ err = omap_sham_update_req(dd);
+ if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
+ /* no final() after finup() */
+ err = omap_sham_final_req(dd);
+ } else if (ctx->op == OP_FINAL) {
+ err = omap_sham_final_req(dd);
+ }
+
+ if (err != -EINPROGRESS) {
+ /* done_task will not finish it, so do it here */
+ omap_sham_finish_req(req, err);
+ tasklet_schedule(&dd->queue_task);
+ }
+
+ dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+ return err;
+}
+
+static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_dev *dd = tctx->dd;
+ unsigned long flags;
+ int err;
+
+ ctx->op = op;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ err = ahash_enqueue_request(&dd->queue, req);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ omap_sham_handle_queue(dd);
+
+ return err;
+}
+
+static int omap_sham_update(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return 0;
+
+ ctx->total = req->nbytes;
+ ctx->sg = req->src;
+ ctx->offset = 0;
+
+ if (ctx->flags & FLAGS_FINUP) {
+ if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
+ /*
+ * OMAP HW accel works only with buffers >= 9
+ * will switch to bypass in final()
+ * final has the same request and data
+ */
+ omap_sham_append_sg(ctx);
+ return 0;
+ } else if (ctx->bufcnt + ctx->total <= 64) {
+ ctx->flags |= FLAGS_CPU;
+ } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
+ /* may be can use faster functions */
+ int aligned = IS_ALIGNED((u32)ctx->sg->offset,
+ sizeof(u32));
+
+ if (aligned && (ctx->flags & FLAGS_FIRST))
+ /* digest: first and final */
+ ctx->flags |= FLAGS_FAST;
+
+ ctx->flags &= ~FLAGS_FIRST;
+ }
+ } else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
+ /* if not finaup -> not fast */
+ omap_sham_append_sg(ctx);
+ return 0;
+ }
+
+ return omap_sham_enqueue(req, OP_UPDATE);
+}
+
+static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(shash)];
+ } desc;
+
+ desc.shash.tfm = shash;
+ desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_digest(&desc.shash, data, len, out);
+}
+
+static int omap_sham_final_shash(struct ahash_request *req)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ return omap_sham_shash_digest(tctx->fallback, req->base.flags,
+ ctx->buffer, ctx->bufcnt, req->result);
+}
+
+static int omap_sham_final(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err = 0;
+
+ ctx->flags |= FLAGS_FINUP;
+
+ /* OMAP HW accel works only with buffers >= 9 */
+ /* HMAC is always >= 9 because of ipad */
+ if ((ctx->digcnt + ctx->bufcnt) < 9)
+ err = omap_sham_final_shash(req);
+ else if (ctx->bufcnt)
+ return omap_sham_enqueue(req, OP_FINAL);
+
+ omap_sham_cleanup(req);
+
+ return err;
+}
+
+static int omap_sham_finup(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err1, err2;
+
+ ctx->flags |= FLAGS_FINUP;
+
+ err1 = omap_sham_update(req);
+ if (err1 == -EINPROGRESS)
+ return err1;
+ /*
+ * final() has to be always called to cleanup resources
+ * even if udpate() failed, except EINPROGRESS
+ */
+ err2 = omap_sham_final(req);
+
+ return err1 ?: err2;
+}
+
+static int omap_sham_digest(struct ahash_request *req)
+{
+ return omap_sham_init(req) ?: omap_sham_finup(req);
+}
+
+static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int ds = crypto_shash_digestsize(bctx->shash);
+ int err, i;
+ err = crypto_shash_setkey(tctx->fallback, key, keylen);
+ if (err)
+ return err;
+
+ if (keylen > bs) {
+ err = omap_sham_shash_digest(bctx->shash,
+ crypto_shash_get_flags(bctx->shash),
+ key, keylen, bctx->ipad);
+ if (err)
+ return err;
+ keylen = ds;
+ } else {
+ memcpy(bctx->ipad, key, keylen);
+ }
+
+ memset(bctx->ipad + keylen, 0, bs - keylen);
+ memcpy(bctx->opad, bctx->ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ bctx->ipad[i] ^= 0x36;
+ bctx->opad[i] ^= 0x5c;
+ }
+
+ return err;
+}
+
+static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ /* Allocate a fallback and abort if it failed. */
+ tctx->fallback = crypto_alloc_shash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback)) {
+ pr_err("omap-sham: fallback driver '%s' "
+ "could not be loaded.\n", alg_name);
+ return PTR_ERR(tctx->fallback);
+ }
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct omap_sham_reqctx));
+
+ if (alg_base) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ tctx->flags |= FLAGS_HMAC;
+ bctx->shash = crypto_alloc_shash(alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(bctx->shash)) {
+ pr_err("omap-sham: base driver '%s' "
+ "could not be loaded.\n", alg_base);
+ crypto_free_shash(tctx->fallback);
+ return PTR_ERR(bctx->shash);
+ }
+
+ }
+
+ return 0;
+}
+
+static int omap_sham_cra_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, NULL);
+}
+
+static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "sha1");
+}
+
+static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "md5");
+}
+
+static void omap_sham_cra_exit(struct crypto_tfm *tfm)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(tctx->fallback);
+ tctx->fallback = NULL;
+
+ if (tctx->flags & FLAGS_HMAC) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ crypto_free_shash(bctx->shash);
+ }
+}
+
+static struct ahash_alg algs[] = {
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "omap-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "omap-md5",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "omap-hmac-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_sha1_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "omap-hmac-md5",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_md5_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+}
+};
+
+static void omap_sham_done_task(unsigned long data)
+{
+ struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int ready = 1;
+
+ if (ctx->flags & FLAGS_OUTPUT_READY) {
+ ctx->flags &= ~FLAGS_OUTPUT_READY;
+ ready = 1;
+ }
+
+ if (dd->flags & FLAGS_DMA_ACTIVE) {
+ dd->flags &= ~FLAGS_DMA_ACTIVE;
+ omap_sham_update_dma_stop(dd);
+ omap_sham_update_dma_slow(dd);
+ }
+
+ if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
+ dev_dbg(dd->dev, "update done\n");
+ /* finish curent request */
+ omap_sham_finish_req(req, 0);
+ /* start new request */
+ omap_sham_handle_queue(dd);
+ }
+}
+
+static void omap_sham_queue_task(unsigned long data)
+{
+ struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+
+ omap_sham_handle_queue(dd);
+}
+
+static irqreturn_t omap_sham_irq(int irq, void *dev_id)
+{
+ struct omap_sham_dev *dd = dev_id;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+ if (!ctx) {
+ dev_err(dd->dev, "unknown interrupt.\n");
+ return IRQ_HANDLED;
+ }
+
+ if (unlikely(ctx->flags & FLAGS_FINAL))
+ /* final -> allow device to go to power-saving mode */
+ omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
+
+ omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
+ SHA_REG_CTRL_OUTPUT_READY);
+ omap_sham_read(dd, SHA_REG_CTRL);
+
+ ctx->flags |= FLAGS_OUTPUT_READY;
+ tasklet_schedule(&dd->done_task);
+
+ return IRQ_HANDLED;
+}
+
+static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct omap_sham_dev *dd = data;
+
+ if (likely(lch == dd->dma_lch))
+ tasklet_schedule(&dd->done_task);
+}
+
+static int omap_sham_dma_init(struct omap_sham_dev *dd)
+{
+ int err;
+
+ dd->dma_lch = -1;
+
+ err = omap_request_dma(dd->dma, dev_name(dd->dev),
+ omap_sham_dma_callback, dd, &dd->dma_lch);
+ if (err) {
+ dev_err(dd->dev, "Unable to request DMA channel\n");
+ return err;
+ }
+ omap_set_dma_dest_params(dd->dma_lch, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ dd->phys_base + SHA_REG_DIN(0), 0, 16);
+
+ omap_set_dma_dest_burst_mode(dd->dma_lch,
+ OMAP_DMA_DATA_BURST_16);
+
+ return 0;
+}
+
+static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
+{
+ if (dd->dma_lch >= 0) {
+ omap_free_dma(dd->dma_lch);
+ dd->dma_lch = -1;
+ }
+}
+
+static int __devinit omap_sham_probe(struct platform_device *pdev)
+{
+ struct omap_sham_dev *dd;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int err, i, j;
+
+ dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(dev, "unable to alloc data struct.\n");
+ err = -ENOMEM;
+ goto data_err;
+ }
+ dd->dev = dev;
+ platform_set_drvdata(pdev, dd);
+
+ INIT_LIST_HEAD(&dd->list);
+ spin_lock_init(&dd->lock);
+ tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
+ tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
+ crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
+
+ dd->irq = -1;
+
+ /* Get the base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no MEM resource info\n");
+ err = -ENODEV;
+ goto res_err;
+ }
+ dd->phys_base = res->start;
+
+ /* Get the DMA */
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(dev, "no DMA resource info\n");
+ err = -ENODEV;
+ goto res_err;
+ }
+ dd->dma = res->start;
+
+ /* Get the IRQ */
+ dd->irq = platform_get_irq(pdev, 0);
+ if (dd->irq < 0) {
+ dev_err(dev, "no IRQ resource info\n");
+ err = dd->irq;
+ goto res_err;
+ }
+
+ err = request_irq(dd->irq, omap_sham_irq,
+ IRQF_TRIGGER_LOW, dev_name(dev), dd);
+ if (err) {
+ dev_err(dev, "unable to request irq.\n");
+ goto res_err;
+ }
+
+ err = omap_sham_dma_init(dd);
+ if (err)
+ goto dma_err;
+
+ /* Initializing the clock */
+ dd->iclk = clk_get(dev, "ick");
+ if (!dd->iclk) {
+ dev_err(dev, "clock intialization failed.\n");
+ err = -ENODEV;
+ goto clk_err;
+ }
+
+ dd->io_base = ioremap(dd->phys_base, SZ_4K);
+ if (!dd->io_base) {
+ dev_err(dev, "can't ioremap\n");
+ err = -ENOMEM;
+ goto io_err;
+ }
+
+ clk_enable(dd->iclk);
+ dev_info(dev, "hw accel on OMAP rev %u.%u\n",
+ (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
+ omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
+ clk_disable(dd->iclk);
+
+ spin_lock(&sham.lock);
+ list_add_tail(&dd->list, &sham.dev_list);
+ spin_unlock(&sham.lock);
+
+ for (i = 0; i < ARRAY_SIZE(algs); i++) {
+ err = crypto_register_ahash(&algs[i]);
+ if (err)
+ goto err_algs;
+ }
+
+ return 0;
+
+err_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_ahash(&algs[j]);
+ iounmap(dd->io_base);
+io_err:
+ clk_put(dd->iclk);
+clk_err:
+ omap_sham_dma_cleanup(dd);
+dma_err:
+ if (dd->irq >= 0)
+ free_irq(dd->irq, dd);
+res_err:
+ kfree(dd);
+ dd = NULL;
+data_err:
+ dev_err(dev, "initialization failed.\n");
+
+ return err;
+}
+
+static int __devexit omap_sham_remove(struct platform_device *pdev)
+{
+ static struct omap_sham_dev *dd;
+ int i;
+
+ dd = platform_get_drvdata(pdev);
+ if (!dd)
+ return -ENODEV;
+ spin_lock(&sham.lock);
+ list_del(&dd->list);
+ spin_unlock(&sham.lock);
+ for (i = 0; i < ARRAY_SIZE(algs); i++)
+ crypto_unregister_ahash(&algs[i]);
+ tasklet_kill(&dd->done_task);
+ tasklet_kill(&dd->queue_task);
+ iounmap(dd->io_base);
+ clk_put(dd->iclk);
+ omap_sham_dma_cleanup(dd);
+ if (dd->irq >= 0)
+ free_irq(dd->irq, dd);
+ kfree(dd);
+ dd = NULL;
+
+ return 0;
+}
+
+static struct platform_driver omap_sham_driver = {
+ .probe = omap_sham_probe,
+ .remove = omap_sham_remove,
+ .driver = {
+ .name = "omap-sham",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap_sham_mod_init(void)
+{
+ pr_info("loading %s driver\n", "omap-sham");
+
+ if (!cpu_class_is_omap2() ||
+ omap_type() != OMAP2_DEVICE_TYPE_SEC) {
+ pr_err("Unsupported cpu\n");
+ return -ENODEV;
+ }
+
+ return platform_driver_register(&omap_sham_driver);
+}
+
+static void __exit omap_sham_mod_exit(void)
+{
+ platform_driver_unregister(&omap_sham_driver);
+}
+
+module_init(omap_sham_mod_init);
+module_exit(omap_sham_mod_exit);
+
+MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Dmitry Kasatkin");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dc558a097311..637c105f53d2 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1,7 +1,7 @@
/*
* talitos - Freescale Integrated Security Engine (SEC) device driver
*
- * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ * Copyright (c) 2008-2010 Freescale Semiconductor, Inc.
*
* Scatterlist Crypto API glue code copied from files with the following:
* Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
@@ -43,9 +43,12 @@
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
+#include <crypto/md5.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include "talitos.h"
@@ -65,6 +68,13 @@ struct talitos_ptr {
__be32 ptr; /* address */
};
+static const struct talitos_ptr zero_entry = {
+ .len = 0,
+ .j_extent = 0,
+ .eptr = 0,
+ .ptr = 0
+};
+
/* descriptor */
struct talitos_desc {
__be32 hdr; /* header high bits */
@@ -146,6 +156,7 @@ struct talitos_private {
/* .features flag */
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+#define TALITOS_FTR_SHA224_HWINIT 0x00000004
static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
{
@@ -692,7 +703,7 @@ static void talitos_unregister_rng(struct device *dev)
#define TALITOS_MAX_KEY_SIZE 64
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
-#define MD5_DIGEST_SIZE 16
+#define MD5_BLOCK_SIZE 64
struct talitos_ctx {
struct device *dev;
@@ -705,6 +716,23 @@ struct talitos_ctx {
unsigned int authsize;
};
+#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
+#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
+
+struct talitos_ahash_req_ctx {
+ u64 count;
+ u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
+ unsigned int hw_context_size;
+ u8 buf[HASH_MAX_BLOCK_SIZE];
+ u8 bufnext[HASH_MAX_BLOCK_SIZE];
+ unsigned int swinit;
+ unsigned int first;
+ unsigned int last;
+ unsigned int to_hash_later;
+ struct scatterlist bufsl[2];
+ struct scatterlist *psrc;
+};
+
static int aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
@@ -821,10 +849,14 @@ static void talitos_sg_unmap(struct device *dev,
else
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- if (edesc->dst_is_chained)
- talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
- else
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst) {
+ if (edesc->dst_is_chained)
+ talitos_unmap_sg_chain(dev, dst,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(dev, dst, dst_nents,
+ DMA_FROM_DEVICE);
+ }
} else
if (edesc->src_is_chained)
talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
@@ -1114,12 +1146,67 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
return sg_nents;
}
+/**
+ * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy to
+ * @buflen: The number of bytes to copy
+ * @skip: The number of bytes to skip before copying.
+ * Note: skip + buflen should equal SG total size.
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen, unsigned int skip)
+{
+ unsigned int offset = 0;
+ unsigned int boffset = 0;
+ struct sg_mapping_iter miter;
+ unsigned long flags;
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+ size_t total_buffer = buflen + skip;
+
+ sg_flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+ local_irq_save(flags);
+
+ while (sg_miter_next(&miter) && offset < total_buffer) {
+ unsigned int len;
+ unsigned int ignore;
+
+ if ((offset + miter.length) > skip) {
+ if (offset < skip) {
+ /* Copy part of this segment */
+ ignore = skip - offset;
+ len = miter.length - ignore;
+ memcpy(buf + boffset, miter.addr + ignore, len);
+ } else {
+ /* Copy all of this segment */
+ len = miter.length;
+ memcpy(buf + boffset, miter.addr, len);
+ }
+ boffset += len;
+ }
+ offset += miter.length;
+ }
+
+ sg_miter_stop(&miter);
+
+ local_irq_restore(flags);
+ return boffset;
+}
+
/*
* allocate and map the extended descriptor
*/
static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct scatterlist *src,
struct scatterlist *dst,
+ int hash_result,
unsigned int cryptlen,
unsigned int authsize,
int icv_stashing,
@@ -1139,11 +1226,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
src_nents = sg_count(src, cryptlen + authsize, &src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents;
- if (dst == src) {
- dst_nents = src_nents;
+ if (hash_result) {
+ dst_nents = 0;
} else {
- dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
- dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+ if (dst == src) {
+ dst_nents = src_nents;
+ } else {
+ dst_nents = sg_count(dst, cryptlen + authsize,
+ &dst_chained);
+ dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+ }
}
/*
@@ -1172,8 +1264,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc->src_is_chained = src_chained;
edesc->dst_is_chained = dst_chained;
edesc->dma_len = dma_len;
- edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
- edesc->dma_len, DMA_BIDIRECTIONAL);
+ if (dma_len)
+ edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
return edesc;
}
@@ -1184,7 +1278,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
areq->cryptlen, ctx->authsize, icv_stashing,
areq->base.flags);
}
@@ -1441,8 +1535,8 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
- 0, 0, areq->base.flags);
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
+ areq->nbytes, 0, 0, areq->base.flags);
}
static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1478,15 +1572,329 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
}
+static void common_nonsnoop_hash_unmap(struct device *dev,
+ struct talitos_edesc *edesc,
+ struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+
+ /* When using hashctx-in, must unmap it. */
+ if (edesc->desc.ptr[1].len)
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
+ DMA_TO_DEVICE);
+
+ if (edesc->desc.ptr[2].len)
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
+ DMA_TO_DEVICE);
+
+ talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
+
+ if (edesc->dma_len)
+ dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+
+}
+
+static void ahash_done(struct device *dev,
+ struct talitos_desc *desc, void *context,
+ int err)
+{
+ struct ahash_request *areq = context;
+ struct talitos_edesc *edesc =
+ container_of(desc, struct talitos_edesc, desc);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ if (!req_ctx->last && req_ctx->to_hash_later) {
+ /* Position any partial block for next update/final/finup */
+ memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
+ }
+ common_nonsnoop_hash_unmap(dev, edesc, areq);
+
+ kfree(edesc);
+
+ areq->base.complete(&areq->base, err);
+}
+
+static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ struct ahash_request *areq, unsigned int length,
+ void (*callback) (struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error))
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct device *dev = ctx->dev;
+ struct talitos_desc *desc = &edesc->desc;
+ int sg_count, ret;
+
+ /* first DWORD empty */
+ desc->ptr[0] = zero_entry;
+
+ /* hash context in */
+ if (!req_ctx->first || req_ctx->swinit) {
+ map_single_talitos_ptr(dev, &desc->ptr[1],
+ req_ctx->hw_context_size,
+ (char *)req_ctx->hw_context, 0,
+ DMA_TO_DEVICE);
+ req_ctx->swinit = 0;
+ } else {
+ desc->ptr[1] = zero_entry;
+ /* Indicate next op is not the first. */
+ req_ctx->first = 0;
+ }
+
+ /* HMAC key */
+ if (ctx->keylen)
+ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
+ (char *)&ctx->key, 0, DMA_TO_DEVICE);
+ else
+ desc->ptr[2] = zero_entry;
+
+ /*
+ * data in
+ */
+ desc->ptr[3].len = cpu_to_be16(length);
+ desc->ptr[3].j_extent = 0;
+
+ sg_count = talitos_map_sg(dev, req_ctx->psrc,
+ edesc->src_nents ? : 1,
+ DMA_TO_DEVICE,
+ edesc->src_is_chained);
+
+ if (sg_count == 1) {
+ to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
+ } else {
+ sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
+ &edesc->link_tbl[0]);
+ if (sg_count > 1) {
+ desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
+ dma_sync_single_for_device(ctx->dev,
+ edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+ } else {
+ /* Only one segment now, so no link tbl needed */
+ to_talitos_ptr(&desc->ptr[3],
+ sg_dma_address(req_ctx->psrc));
+ }
+ }
+
+ /* fifth DWORD empty */
+ desc->ptr[4] = zero_entry;
+
+ /* hash/HMAC out -or- hash context out */
+ if (req_ctx->last)
+ map_single_talitos_ptr(dev, &desc->ptr[5],
+ crypto_ahash_digestsize(tfm),
+ areq->result, 0, DMA_FROM_DEVICE);
+ else
+ map_single_talitos_ptr(dev, &desc->ptr[5],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context, 0, DMA_FROM_DEVICE);
+
+ /* last DWORD empty */
+ desc->ptr[6] = zero_entry;
+
+ ret = talitos_submit(dev, desc, callback, areq);
+ if (ret != -EINPROGRESS) {
+ common_nonsnoop_hash_unmap(dev, edesc, areq);
+ kfree(edesc);
+ }
+ return ret;
+}
+
+static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
+ unsigned int nbytes)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
+ nbytes, 0, 0, areq->base.flags);
+}
+
+static int ahash_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ /* Initialize the context */
+ req_ctx->count = 0;
+ req_ctx->first = 1; /* first indicates h/w must init its context */
+ req_ctx->swinit = 0; /* assume h/w init of context */
+ req_ctx->hw_context_size =
+ (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+
+ return 0;
+}
+
+/*
+ * on h/w without explicit sha224 support, we initialize h/w context
+ * manually with sha224 constants, and tell it to run sha256.
+ */
+static int ahash_init_sha224_swinit(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ ahash_init(areq);
+ req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+
+ req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0);
+ req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1);
+ req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2);
+ req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3);
+ req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4);
+ req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5);
+ req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6);
+ req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7);
+
+ /* init 64-bit count */
+ req_ctx->hw_context[8] = 0;
+ req_ctx->hw_context[9] = 0;
+
+ return 0;
+}
+
+static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int nbytes_to_hash;
+ unsigned int to_hash_later;
+ unsigned int index;
+ int chained;
+
+ index = req_ctx->count & (blocksize - 1);
+ req_ctx->count += nbytes;
+
+ if (!req_ctx->last && (index + nbytes) < blocksize) {
+ /* Buffer the partial block */
+ sg_copy_to_buffer(areq->src,
+ sg_count(areq->src, nbytes, &chained),
+ req_ctx->buf + index, nbytes);
+ return 0;
+ }
+
+ if (index) {
+ /* partial block from previous update; chain it in. */
+ sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1);
+ sg_set_buf(req_ctx->bufsl, req_ctx->buf, index);
+ if (nbytes)
+ scatterwalk_sg_chain(req_ctx->bufsl, 2,
+ areq->src);
+ req_ctx->psrc = req_ctx->bufsl;
+ } else {
+ req_ctx->psrc = areq->src;
+ }
+ nbytes_to_hash = index + nbytes;
+ if (!req_ctx->last) {
+ to_hash_later = (nbytes_to_hash & (blocksize - 1));
+ if (to_hash_later) {
+ int nents;
+ /* Must copy to_hash_later bytes from the end
+ * to bufnext (a partial block) for later.
+ */
+ nents = sg_count(areq->src, nbytes, &chained);
+ sg_copy_end_to_buffer(areq->src, nents,
+ req_ctx->bufnext,
+ to_hash_later,
+ nbytes - to_hash_later);
+
+ /* Adjust count for what will be hashed now */
+ nbytes_to_hash -= to_hash_later;
+ }
+ req_ctx->to_hash_later = to_hash_later;
+ }
+
+ /* allocate extended descriptor */
+ edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ edesc->desc.hdr = ctx->desc_hdr_template;
+
+ /* On last one, request SEC to pad; otherwise continue */
+ if (req_ctx->last)
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
+ else
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
+
+ /* request SEC to INIT hash. */
+ if (req_ctx->first && !req_ctx->swinit)
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+
+ /* When the tfm context has a keylen, it's an HMAC.
+ * A first or last (ie. not middle) descriptor must request HMAC.
+ */
+ if (ctx->keylen && (req_ctx->first || req_ctx->last))
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
+
+ return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
+ ahash_done);
+}
+
+static int ahash_update(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->last = 0;
+
+ return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_final(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->last = 1;
+
+ return ahash_process_req(areq, 0);
+}
+
+static int ahash_finup(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->last = 1;
+
+ return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_digest(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+
+ ahash->init(areq);
+ req_ctx->last = 1;
+
+ return ahash_process_req(areq, areq->nbytes);
+}
+
struct talitos_alg_template {
- struct crypto_alg alg;
+ u32 type;
+ union {
+ struct crypto_alg crypto;
+ struct ahash_alg hash;
+ } alg;
__be32 desc_hdr_template;
};
static struct talitos_alg_template driver_algs[] = {
/* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1511,8 +1919,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1538,8 +1946,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1564,8 +1972,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1591,8 +1999,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1617,8 +2025,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1645,8 +2053,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
/* ABLKCIPHER algorithms. */
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1667,8 +2075,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1689,14 +2097,140 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
DESC_HDR_MODE0_DEU_3DES,
- }
+ },
+ /* AHASH algorithms. */
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-talitos",
+ .cra_blocksize = MD5_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_MD5,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-talitos",
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA1,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-talitos",
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA224,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-talitos",
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA256,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-talitos",
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUB |
+ DESC_HDR_MODE0_MDEUB_SHA384,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-talitos",
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUB |
+ DESC_HDR_MODE0_MDEUB_SHA512,
+ },
};
struct talitos_crypto_alg {
struct list_head entry;
struct device *dev;
- __be32 desc_hdr_template;
- struct crypto_alg crypto_alg;
+ struct talitos_alg_template algt;
};
static int talitos_cra_init(struct crypto_tfm *tfm)
@@ -1705,13 +2239,28 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
struct talitos_crypto_alg *talitos_alg;
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
- talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg);
+ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+ talitos_alg = container_of(__crypto_ahash_alg(alg),
+ struct talitos_crypto_alg,
+ algt.alg.hash);
+ else
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.crypto);
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
/* copy descriptor header template value */
- ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
+ ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
+
+ return 0;
+}
+
+static int talitos_cra_init_aead(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ talitos_cra_init(tfm);
/* random first IV */
get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
@@ -1719,6 +2268,19 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ talitos_cra_init(tfm);
+
+ ctx->keylen = 0;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct talitos_ahash_req_ctx));
+
+ return 0;
+}
+
/*
* given the alg's descriptor header template, determine whether descriptor
* type and primary/secondary execution units required match the hw
@@ -1747,7 +2309,15 @@ static int talitos_remove(struct of_device *ofdev)
int i;
list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
+ switch (t_alg->algt.type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_unregister_alg(&t_alg->algt.alg.crypto);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&t_alg->algt.alg.hash);
+ break;
+ }
list_del(&t_alg->entry);
kfree(t_alg);
}
@@ -1781,6 +2351,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
struct talitos_alg_template
*template)
{
+ struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_crypto_alg *t_alg;
struct crypto_alg *alg;
@@ -1788,16 +2359,36 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
if (!t_alg)
return ERR_PTR(-ENOMEM);
- alg = &t_alg->crypto_alg;
- *alg = template->alg;
+ t_alg->algt = *template;
+
+ switch (t_alg->algt.type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg = &t_alg->algt.alg.crypto;
+ alg->cra_init = talitos_cra_init;
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg = &t_alg->algt.alg.crypto;
+ alg->cra_init = talitos_cra_init_aead;
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ alg = &t_alg->algt.alg.hash.halg.base;
+ alg->cra_init = talitos_cra_init_ahash;
+ if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+ !strcmp(alg->cra_name, "sha224")) {
+ t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
+ t_alg->algt.desc_hdr_template =
+ DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA256;
+ }
+ break;
+ }
alg->cra_module = THIS_MODULE;
- alg->cra_init = talitos_cra_init;
alg->cra_priority = TALITOS_CRA_PRIORITY;
alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
- t_alg->desc_hdr_template = template->desc_hdr_template;
t_alg->dev = dev;
return t_alg;
@@ -1807,7 +2398,7 @@ static int talitos_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
struct device *dev = &ofdev->dev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct talitos_private *priv;
const unsigned int *prop;
int i, err;
@@ -1877,7 +2468,8 @@ static int talitos_probe(struct of_device *ofdev,
priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
if (of_device_is_compatible(np, "fsl,sec2.1"))
- priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
+ priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
+ TALITOS_FTR_SHA224_HWINIT;
priv->chan = kzalloc(sizeof(struct talitos_channel) *
priv->num_channels, GFP_KERNEL);
@@ -1931,6 +2523,7 @@ static int talitos_probe(struct of_device *ofdev,
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
struct talitos_crypto_alg *t_alg;
+ char *name = NULL;
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
@@ -1938,15 +2531,27 @@ static int talitos_probe(struct of_device *ofdev,
goto err_out;
}
- err = crypto_register_alg(&t_alg->crypto_alg);
+ switch (t_alg->algt.type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_AEAD:
+ err = crypto_register_alg(
+ &t_alg->algt.alg.crypto);
+ name = t_alg->algt.alg.crypto.cra_driver_name;
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ err = crypto_register_ahash(
+ &t_alg->algt.alg.hash);
+ name =
+ t_alg->algt.alg.hash.halg.base.cra_driver_name;
+ break;
+ }
if (err) {
dev_err(dev, "%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
+ name);
kfree(t_alg);
} else {
list_add_tail(&t_alg->entry, &priv->alg_list);
- dev_info(dev, "%s\n",
- t_alg->crypto_alg.cra_driver_name);
+ dev_info(dev, "%s\n", name);
}
}
}
@@ -1968,8 +2573,11 @@ static const struct of_device_id talitos_match[] = {
MODULE_DEVICE_TABLE(of, talitos_match);
static struct of_platform_driver talitos_driver = {
- .name = "talitos",
- .match_table = talitos_match,
+ .driver = {
+ .name = "talitos",
+ .owner = THIS_MODULE,
+ .of_match_table = talitos_match,
+ },
.probe = talitos_probe,
.remove = talitos_remove,
};
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index ff5a1450e145..0b746aca4587 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -1,7 +1,7 @@
/*
* Freescale SEC (talitos) device register and descriptor header defines
*
- * Copyright (c) 2006-2008 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006-2010 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -130,6 +130,9 @@
#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/
#define TALITOS_CRCUISR_LO 0xf034
+#define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28
+#define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48
+
/*
* talitos descriptor header (hdr) bits
*/
@@ -157,12 +160,16 @@
#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
#define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000)
+#define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000)
#define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000)
#define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000)
#define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000)
+#define DESC_HDR_MODE0_MDEU_SHA224 cpu_to_be32(0x00300000)
#define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000)
#define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE0_MDEUB_SHA384 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE0_MDEUB_SHA512 cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \
DESC_HDR_MODE0_MDEU_HMAC)
#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
@@ -181,9 +188,12 @@
#define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000)
#define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800)
#define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400)
+#define DESC_HDR_MODE1_MDEU_SHA224 cpu_to_be32(0x00000300)
#define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200)
#define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100)
#define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE1_MDEUB_SHA384 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE1_MDEUB_SHA512 cpu_to_be32(0x00000200)
#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \
DESC_HDR_MODE1_MDEU_HMAC)
#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c27f80e5d531..9e01e96fee94 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -141,6 +141,13 @@ config COH901318
help
Enable support for ST-Ericsson COH 901 318 DMA.
+config STE_DMA40
+ bool "ST-Ericsson DMA40 support"
+ depends on ARCH_U8500
+ select DMA_ENGINE
+ help
+ Support for ST-Ericsson DMA40 controller
+
config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support"
depends on 440SPe || 440SP
@@ -149,9 +156,25 @@ config AMCC_PPC440SPE_ADMA
help
Enable support for the AMCC PPC440SPe RAID engines.
+config TIMB_DMA
+ tristate "Timberdale FPGA DMA support"
+ depends on MFD_TIMBERDALE || HAS_IOMEM
+ select DMA_ENGINE
+ help
+ Enable support for the Timberdale FPGA DMA engine.
+
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool
+config PL330_DMA
+ tristate "DMA API Driver for PL330"
+ select DMA_ENGINE
+ depends on PL330
+ help
+ Select if your platform has one or more PL330 DMACs.
+ You need to provide platform specific settings via
+ platform_data for a dma-pl330 device.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 22bba3d5e2b6..0fe5ebbfda5d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -20,3 +20,6 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+obj-$(CONFIG_PL330_DMA) += pl330.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 308ab320e20b..e88076022a7a 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -760,13 +760,18 @@ err_desc_get:
return NULL;
}
-static void atc_terminate_all(struct dma_chan *chan)
+static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
@@ -790,32 +795,30 @@ static void atc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc);
+
+ return 0;
}
/**
- * atc_is_tx_complete - poll for transaction completion
+ * atc_tx_status - poll for transaction completion
* @chan: DMA channel
* @cookie: transaction identifier to check status of
- * @done: if not %NULL, updated with last completed transaction
- * @used: if not %NULL, updated with last used transaction
+ * @txstate: if not %NULL updated with transaction state
*
- * If @done and @used are passed in, upon return they reflect the driver
+ * If @txstate is passed in, upon return it reflect the driver
* internal state and can be used with dma_async_is_complete() to check
* the status of multiple cookies without re-checking hardware state.
*/
static enum dma_status
-atc_is_tx_complete(struct dma_chan *chan,
+atc_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
enum dma_status ret;
- dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
- cookie, done ? *done : 0, used ? *used : 0);
-
spin_lock_bh(&atchan->lock);
last_complete = atchan->completed_cookie;
@@ -833,10 +836,10 @@ atc_is_tx_complete(struct dma_chan *chan,
spin_unlock_bh(&atchan->lock);
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+ dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
+ cookie, last_complete ? last_complete : 0,
+ last_used ? last_used : 0);
return ret;
}
@@ -1082,7 +1085,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* set base routines */
atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
- atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
+ atdma->dma_common.device_tx_status = atc_tx_status;
atdma->dma_common.device_issue_pending = atc_issue_pending;
atdma->dma_common.dev = &pdev->dev;
@@ -1092,7 +1095,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
- atdma->dma_common.device_terminate_all = atc_terminate_all;
+ atdma->dma_common.device_control = atc_control;
}
dma_writel(atdma, EN, AT_DMA_ENABLE);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 1656fdcdb6c2..a724e6be1b4d 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -37,7 +37,7 @@ struct coh901318_desc {
struct list_head node;
struct scatterlist *sg;
unsigned int sg_len;
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
enum dma_data_direction dir;
unsigned long flags;
};
@@ -283,7 +283,7 @@ static int coh901318_start(struct coh901318_chan *cohc)
}
static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
- struct coh901318_lli *data)
+ struct coh901318_lli *lli)
{
int channel = cohc->id;
void __iomem *virtbase = cohc->base->virtbase;
@@ -292,18 +292,18 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
COH901318_CX_STAT_SPACING*channel) &
COH901318_CX_STAT_ACTIVE);
- writel(data->src_addr,
+ writel(lli->src_addr,
virtbase + COH901318_CX_SRC_ADDR +
COH901318_CX_SRC_ADDR_SPACING * channel);
- writel(data->dst_addr, virtbase +
+ writel(lli->dst_addr, virtbase +
COH901318_CX_DST_ADDR +
COH901318_CX_DST_ADDR_SPACING * channel);
- writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR +
+ writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR +
COH901318_CX_LNK_ADDR_SPACING * channel);
- writel(data->control, virtbase + COH901318_CX_CTRL +
+ writel(lli->control, virtbase + COH901318_CX_CTRL +
COH901318_CX_CTRL_SPACING * channel);
return 0;
@@ -408,33 +408,107 @@ coh901318_first_queued(struct coh901318_chan *cohc)
return d;
}
+static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli)
+{
+ struct coh901318_lli *lli = in_lli;
+ u32 bytes = 0;
+
+ while (lli) {
+ bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK;
+ lli = lli->virt_link_addr;
+ }
+ return bytes;
+}
+
/*
- * DMA start/stop controls
+ * Get the number of bytes left to transfer on this channel,
+ * it is unwise to call this before stopping the channel for
+ * absolute measures, but for a rough guess you can still call
+ * it.
*/
-u32 coh901318_get_bytes_left(struct dma_chan *chan)
+static u32 coh901318_get_bytes_left(struct dma_chan *chan)
{
- unsigned long flags;
- u32 ret;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ struct coh901318_desc *cohd;
+ struct list_head *pos;
+ unsigned long flags;
+ u32 left = 0;
+ int i = 0;
spin_lock_irqsave(&cohc->lock, flags);
- /* Read transfer count value */
- ret = readl(cohc->base->virtbase +
- COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
- cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK;
+ /*
+ * If there are many queued jobs, we iterate and add the
+ * size of them all. We take a special look on the first
+ * job though, since it is probably active.
+ */
+ list_for_each(pos, &cohc->active) {
+ /*
+ * The first job in the list will be working on the
+ * hardware. The job can be stopped but still active,
+ * so that the transfer counter is somewhere inside
+ * the buffer.
+ */
+ cohd = list_entry(pos, struct coh901318_desc, node);
+
+ if (i == 0) {
+ struct coh901318_lli *lli;
+ dma_addr_t ladd;
+
+ /* Read current transfer count value */
+ left = readl(cohc->base->virtbase +
+ COH901318_CX_CTRL +
+ COH901318_CX_CTRL_SPACING * cohc->id) &
+ COH901318_CX_CTRL_TC_VALUE_MASK;
+
+ /* See if the transfer is linked... */
+ ladd = readl(cohc->base->virtbase +
+ COH901318_CX_LNK_ADDR +
+ COH901318_CX_LNK_ADDR_SPACING *
+ cohc->id) &
+ ~COH901318_CX_LNK_LINK_IMMEDIATE;
+ /* Single transaction */
+ if (!ladd)
+ continue;
+
+ /*
+ * Linked transaction, follow the lli, find the
+ * currently processing lli, and proceed to the next
+ */
+ lli = cohd->lli;
+ while (lli && lli->link_addr != ladd)
+ lli = lli->virt_link_addr;
+
+ if (lli)
+ lli = lli->virt_link_addr;
+
+ /*
+ * Follow remaining lli links around to count the total
+ * number of bytes left
+ */
+ left += coh901318_get_bytes_in_lli(lli);
+ } else {
+ left += coh901318_get_bytes_in_lli(cohd->lli);
+ }
+ i++;
+ }
+
+ /* Also count bytes in the queued jobs */
+ list_for_each(pos, &cohc->queue) {
+ cohd = list_entry(pos, struct coh901318_desc, node);
+ left += coh901318_get_bytes_in_lli(cohd->lli);
+ }
spin_unlock_irqrestore(&cohc->lock, flags);
- return ret;
+ return left;
}
-EXPORT_SYMBOL(coh901318_get_bytes_left);
-
-/* Stops a transfer without losing data. Enables power save.
- Use this function in conjunction with coh901318_continue(..)
-*/
-void coh901318_stop(struct dma_chan *chan)
+/*
+ * Pauses a transfer without losing data. Enables power save.
+ * Use this function in conjunction with coh901318_resume.
+ */
+static void coh901318_pause(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
@@ -475,12 +549,11 @@ void coh901318_stop(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
-EXPORT_SYMBOL(coh901318_stop);
-/* Continues a transfer that has been stopped via 300_dma_stop(..).
+/* Resumes a transfer that has been stopped via 300_dma_stop(..).
Power save is handled.
*/
-void coh901318_continue(struct dma_chan *chan)
+static void coh901318_resume(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
@@ -506,7 +579,6 @@ void coh901318_continue(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
-EXPORT_SYMBOL(coh901318_continue);
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
@@ -565,29 +637,30 @@ static int coh901318_config(struct coh901318_chan *cohc,
*/
static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
{
- struct coh901318_desc *cohd_que;
+ struct coh901318_desc *cohd;
- /* start queued jobs, if any
+ /*
+ * start queued jobs, if any
* TODO: transmit all queued jobs in one go
*/
- cohd_que = coh901318_first_queued(cohc);
+ cohd = coh901318_first_queued(cohc);
- if (cohd_que != NULL) {
+ if (cohd != NULL) {
/* Remove from queue */
- coh901318_desc_remove(cohd_que);
+ coh901318_desc_remove(cohd);
/* initiate DMA job */
cohc->busy = 1;
- coh901318_desc_submit(cohc, cohd_que);
+ coh901318_desc_submit(cohc, cohd);
- coh901318_prep_linked_list(cohc, cohd_que->data);
+ coh901318_prep_linked_list(cohc, cohd->lli);
- /* start dma job */
+ /* start dma job on this channel */
coh901318_start(cohc);
}
- return cohd_que;
+ return cohd;
}
/*
@@ -622,7 +695,7 @@ static void dma_tasklet(unsigned long data)
cohc->completed = cohd_fin->desc.cookie;
/* release the lli allocation and remove the descriptor */
- coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd_fin);
@@ -666,23 +739,44 @@ static void dma_tasklet(unsigned long data)
/* called from interrupt context */
static void dma_tc_handle(struct coh901318_chan *cohc)
{
- BUG_ON(!cohc->allocated && (list_empty(&cohc->active) ||
- list_empty(&cohc->queue)));
-
- if (!cohc->allocated)
+ /*
+ * If the channel is not allocated, then we shouldn't have
+ * any TC interrupts on it.
+ */
+ if (!cohc->allocated) {
+ dev_err(COHC_2_DEV(cohc), "spurious interrupt from "
+ "unallocated channel\n");
return;
+ }
spin_lock(&cohc->lock);
+ /*
+ * When we reach this point, at least one queue item
+ * should have been moved over from cohc->queue to
+ * cohc->active and run to completion, that is why we're
+ * getting a terminal count interrupt is it not?
+ * If you get this BUG() the most probable cause is that
+ * the individual nodes in the lli chain have IRQ enabled,
+ * so check your platform config for lli chain ctrl.
+ */
+ BUG_ON(list_empty(&cohc->active));
+
cohc->nbr_active_done++;
+ /*
+ * This attempt to take a job from cohc->queue, put it
+ * into cohc->active and start it.
+ */
if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
- BUG_ON(list_empty(&cohc->active));
-
spin_unlock(&cohc->lock);
+ /*
+ * This tasklet will remove items from cohc->active
+ * and thus terminates them.
+ */
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
@@ -809,6 +903,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
static int coh901318_alloc_chan_resources(struct dma_chan *chan)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ unsigned long flags;
dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
__func__, cohc->id);
@@ -816,11 +911,15 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan)
if (chan->client_count > 1)
return -EBUSY;
+ spin_lock_irqsave(&cohc->lock, flags);
+
coh901318_config(cohc, NULL);
cohc->allocated = 1;
cohc->completed = chan->cookie = 1;
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
return 1;
}
@@ -843,7 +942,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
}
@@ -870,7 +969,7 @@ static struct dma_async_tx_descriptor *
coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t size, unsigned long flags)
{
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
struct coh901318_desc *cohd;
unsigned long flg;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -892,23 +991,23 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
lli_len++;
- data = coh901318_lli_alloc(&cohc->base->pool, lli_len);
+ lli = coh901318_lli_alloc(&cohc->base->pool, lli_len);
- if (data == NULL)
+ if (lli == NULL)
goto err;
ret = coh901318_lli_fill_memcpy(
- &cohc->base->pool, data, src, size, dest,
+ &cohc->base->pool, lli, src, size, dest,
cohc_chan_param(cohc)->ctrl_lli_chained,
ctrl_last);
if (ret)
goto err;
- COH_DBG(coh901318_list_print(cohc, data));
+ COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
- cohd->data = data;
+ cohd->lli = lli;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
@@ -926,7 +1025,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned long flags)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
struct coh901318_desc *cohd;
const struct coh901318_params *params;
struct scatterlist *sg;
@@ -999,13 +1098,13 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
pr_debug("Allocate %d lli:s for this transfer\n", len);
- data = coh901318_lli_alloc(&cohc->base->pool, len);
+ lli = coh901318_lli_alloc(&cohc->base->pool, len);
- if (data == NULL)
+ if (lli == NULL)
goto err_dma_alloc;
- /* initiate allocated data list */
- ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
+ /* initiate allocated lli list */
+ ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
cohc_dev_addr(cohc),
ctrl_chained,
ctrl,
@@ -1014,14 +1113,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret)
goto err_lli_fill;
- COH_DBG(coh901318_list_print(cohc, data));
+ COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
- cohd->data = data;
+ cohd->lli = lli;
spin_unlock_irqrestore(&cohc->lock, flg);
@@ -1035,9 +1134,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
static enum dma_status
-coh901318_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done,
- dma_cookie_t *used)
+coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_cookie_t last_used;
@@ -1049,10 +1147,10 @@ coh901318_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used,
+ coh901318_get_bytes_left(chan));
+ if (ret == DMA_IN_PROGRESS && cohc->stopped)
+ ret = DMA_PAUSED;
return ret;
}
@@ -1065,23 +1163,42 @@ coh901318_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&cohc->lock, flags);
- /* Busy means that pending jobs are already being processed */
+ /*
+ * Busy means that pending jobs are already being processed,
+ * and then there is no point in starting the queue: the
+ * terminal count interrupt on the channel will take the next
+ * job on the queue and execute it anyway.
+ */
if (!cohc->busy)
coh901318_queue_start(cohc);
spin_unlock_irqrestore(&cohc->lock, flags);
}
-static void
-coh901318_terminate_all(struct dma_chan *chan)
+static int
+coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
unsigned long flags;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_desc *cohd;
void __iomem *virtbase = cohc->base->virtbase;
- coh901318_stop(chan);
+ if (cmd == DMA_PAUSE) {
+ coh901318_pause(chan);
+ return 0;
+ }
+
+ if (cmd == DMA_RESUME) {
+ coh901318_resume(chan);
+ return 0;
+ }
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ /* The remainder of this function terminates the transfer */
+ coh901318_pause(chan);
spin_lock_irqsave(&cohc->lock, flags);
/* Clear any pending BE or TC interrupt */
@@ -1099,7 +1216,7 @@ coh901318_terminate_all(struct dma_chan *chan)
while ((cohd = coh901318_first_active_get(cohc))) {
/* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd);
@@ -1108,7 +1225,7 @@ coh901318_terminate_all(struct dma_chan *chan)
while ((cohd = coh901318_first_queued(cohc))) {
/* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd);
@@ -1120,6 +1237,8 @@ coh901318_terminate_all(struct dma_chan *chan)
cohc->busy = 0;
spin_unlock_irqrestore(&cohc->lock, flags);
+
+ return 0;
}
void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base)
@@ -1235,9 +1354,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
- base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_slave.device_tx_status = coh901318_tx_status;
base->dma_slave.device_issue_pending = coh901318_issue_pending;
- base->dma_slave.device_terminate_all = coh901318_terminate_all;
+ base->dma_slave.device_control = coh901318_control;
base->dma_slave.dev = &pdev->dev;
err = dma_async_device_register(&base->dma_slave);
@@ -1255,9 +1374,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
- base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_memcpy.device_tx_status = coh901318_tx_status;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
- base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
+ base->dma_memcpy.device_control = coh901318_control;
base->dma_memcpy.dev = &pdev->dev;
/*
* This controller can only access address at even 32bit boundaries,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d18b5d069d7e..9d31d5eb95c1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -515,7 +515,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
break;
if (--device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
- chan->private = NULL;
chan = NULL;
}
}
@@ -537,7 +536,6 @@ void dma_release_channel(struct dma_chan *chan)
/* drop PRIVATE cap enabled by __dma_request_channel() */
if (--chan->device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
- chan->private = NULL;
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -695,11 +693,11 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
- !device->device_terminate_all);
+ !device->device_control);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
- BUG_ON(!device->device_is_tx_complete);
+ BUG_ON(!device->device_tx_status);
BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev);
@@ -978,7 +976,9 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
tx->chan = chan;
+ #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
spin_lock_init(&tx->lock);
+ #endif
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
@@ -1011,7 +1011,7 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
*/
void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
{
- struct dma_async_tx_descriptor *dep = tx->next;
+ struct dma_async_tx_descriptor *dep = txd_next(tx);
struct dma_async_tx_descriptor *dep_next;
struct dma_chan *chan;
@@ -1019,7 +1019,7 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
return;
/* we'll submit tx->next now, so clear the link */
- tx->next = NULL;
+ txd_clear_next(tx);
chan = dep->chan;
/* keep submitting up until a channel switch is detected
@@ -1027,14 +1027,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
* processing the interrupt from async_tx_channel_switch
*/
for (; dep; dep = dep_next) {
- spin_lock_bh(&dep->lock);
- dep->parent = NULL;
- dep_next = dep->next;
+ txd_lock(dep);
+ txd_clear_parent(dep);
+ dep_next = txd_next(dep);
if (dep_next && dep_next->chan == chan)
- dep->next = NULL; /* ->next will be submitted */
+ txd_clear_next(dep); /* ->next will be submitted */
else
dep_next = NULL; /* submit current dep and terminate */
- spin_unlock_bh(&dep->lock);
+ txd_unlock(dep);
dep->tx_submit(dep);
}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index d28369f7afd2..a3991ab0d67e 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -781,13 +781,18 @@ err_desc_get:
return NULL;
}
-static void dwc_terminate_all(struct dma_chan *chan)
+static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
@@ -810,12 +815,14 @@ static void dwc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc);
+
+ return 0;
}
static enum dma_status
-dwc_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+dwc_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
dma_cookie_t last_used;
@@ -835,10 +842,7 @@ dwc_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return ret;
}
@@ -1338,9 +1342,9 @@ static int __init dw_probe(struct platform_device *pdev)
dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
- dw->dma.device_terminate_all = dwc_terminate_all;
+ dw->dma.device_control = dwc_control;
- dw->dma.device_is_tx_complete = dwc_is_tx_complete;
+ dw->dma.device_tx_status = dwc_tx_status;
dw->dma.device_issue_pending = dwc_issue_pending;
dma_writel(dw, CFG, DW_CFG_DMA_EN);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 88f470f0d820..8088b14ba5f7 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -775,13 +775,18 @@ fail:
return NULL;
}
-static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
+static int fsl_dma_device_control(struct dma_chan *dchan,
+ enum dma_ctrl_cmd cmd, unsigned long arg)
{
struct fsldma_chan *chan;
unsigned long flags;
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
if (!dchan)
- return;
+ return -EINVAL;
chan = to_fsl_chan(dchan);
@@ -795,6 +800,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
fsldma_free_desc_list(chan, &chan->ld_running);
spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ return 0;
}
/**
@@ -965,13 +972,12 @@ static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
}
/**
- * fsl_dma_is_complete - Determine the DMA status
+ * fsl_tx_status - Determine the DMA status
* @chan : Freescale DMA channel
*/
-static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
+static enum dma_status fsl_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
dma_cookie_t last_used;
@@ -982,11 +988,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
last_used = dchan->cookie;
last_complete = chan->completed_cookie;
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -1313,7 +1315,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
INIT_LIST_HEAD(&fdev->common.channels);
/* ioremap the registers for use */
- fdev->regs = of_iomap(op->node, 0);
+ fdev->regs = of_iomap(op->dev.of_node, 0);
if (!fdev->regs) {
dev_err(&op->dev, "unable to ioremap registers\n");
err = -ENOMEM;
@@ -1321,7 +1323,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
}
/* map the channel IRQ if it exists, but don't hookup the handler yet */
- fdev->irq = irq_of_parse_and_map(op->node, 0);
+ fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
@@ -1330,10 +1332,10 @@ static int __devinit fsldma_of_probe(struct of_device *op,
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
- fdev->common.device_is_tx_complete = fsl_dma_is_complete;
+ fdev->common.device_tx_status = fsl_tx_status;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
- fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
+ fdev->common.device_control = fsl_dma_device_control;
fdev->common.dev = &op->dev;
dev_set_drvdata(&op->dev, fdev);
@@ -1343,7 +1345,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
* of_platform_bus_remove(). Instead, we manually instantiate every DMA
* channel object.
*/
- for_each_child_of_node(op->node, child) {
+ for_each_child_of_node(op->dev.of_node, child) {
if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
fsl_dma_chan_probe(fdev, child,
FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
@@ -1409,10 +1411,13 @@ static const struct of_device_id fsldma_of_ids[] = {
};
static struct of_platform_driver fsldma_of_driver = {
- .name = "fsl-elo-dma",
- .match_table = fsldma_of_ids,
- .probe = fsldma_of_probe,
- .remove = fsldma_of_remove,
+ .driver = {
+ .name = "fsl-elo-dma",
+ .owner = THIS_MODULE,
+ .of_match_table = fsldma_of_ids,
+ },
+ .probe = fsldma_of_probe,
+ .remove = fsldma_of_remove,
};
/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 3e5a8005c62b..c9213ead4a26 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -727,18 +727,18 @@ static void ioat1_timer_event(unsigned long data)
}
enum dma_status
-ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device;
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
return DMA_SUCCESS;
device->cleanup_fn((unsigned long) c);
- return ioat_is_complete(c, cookie, done, used);
+ return ioat_tx_status(c, cookie, txstate);
}
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
@@ -858,7 +858,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (tmo == 0 ||
- dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+ dma->device_tx_status(dma_chan, cookie, NULL)
!= DMA_SUCCESS) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
@@ -1199,7 +1199,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_dma_tx_status;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 86b97ac8774e..6d3a73b57e54 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -96,6 +96,7 @@ struct ioat_chan_common {
#define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3
+ #define IOAT_RESHAPE_PENDING 4
struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
@@ -142,15 +143,14 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
}
/**
- * ioat_is_complete - poll the status of an ioat transaction
+ * ioat_tx_status - poll the status of an ioat transaction
* @c: channel handle
* @cookie: transaction identifier
- * @done: if set, updated with last completed transaction
- * @used: if set, updated with last used transaction
+ * @txstate: if set, updated with the transaction state
*/
static inline enum dma_status
-ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat_chan_common *chan = to_chan_common(c);
dma_cookie_t last_used;
@@ -159,10 +159,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
last_used = c->cookie;
last_complete = chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -338,8 +335,8 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
-enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used);
+enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index b5ae56c211e6..3c8b32a83794 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -56,8 +56,6 @@ void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
ioat->dmacount += ioat2_ring_pending(ioat);
ioat->issued = ioat->head;
- /* make descriptor updates globally visible before notifying channel */
- wmb();
writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
@@ -69,9 +67,9 @@ void ioat2_issue_pending(struct dma_chan *c)
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
if (ioat2_ring_pending(ioat)) {
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&ioat->prep_lock);
__ioat2_issue_pending(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
}
}
@@ -80,7 +78,7 @@ void ioat2_issue_pending(struct dma_chan *c)
* @ioat: ioat2+ channel
*
* Check if the number of unsubmitted descriptors has exceeded the
- * watermark. Called with ring_lock held
+ * watermark. Called with prep_lock held
*/
static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
{
@@ -92,7 +90,6 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
{
struct ioat_ring_ent *desc;
struct ioat_dma_descriptor *hw;
- int idx;
if (ioat2_ring_space(ioat) < 1) {
dev_err(to_dev(&ioat->base),
@@ -102,8 +99,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
- idx = ioat2_desc_alloc(ioat, 1);
- desc = ioat2_get_ring_ent(ioat, idx);
+ desc = ioat2_get_ring_ent(ioat, ioat->head);
hw = desc->hw;
hw->ctl = 0;
@@ -117,14 +113,16 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
async_tx_ack(&desc->txd);
ioat2_set_chainaddr(ioat, desc->txd.phys);
dump_desc_dbg(ioat, desc);
+ wmb();
+ ioat->head += 1;
__ioat2_issue_pending(ioat);
}
static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
{
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&ioat->prep_lock);
__ioat2_start_null_desc(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
}
static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
@@ -134,15 +132,16 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
struct ioat_ring_ent *desc;
bool seen_current = false;
u16 active;
- int i;
+ int idx = ioat->tail, i;
dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
active = ioat2_ring_active(ioat);
for (i = 0; i < active && !seen_current; i++) {
- prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
- desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
+ smp_read_barrier_depends();
+ prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
+ desc = ioat2_get_ring_ent(ioat, idx + i);
tx = &desc->txd;
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
@@ -158,11 +157,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
if (tx->phys == phys_complete)
seen_current = true;
}
- ioat->tail += i;
+ smp_mb(); /* finish all descriptor reads before incrementing tail */
+ ioat->tail = idx + i;
BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
- if (ioat->head == ioat->tail) {
+ if (active - i == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
@@ -179,24 +179,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
- prefetch(chan->completion);
-
- if (!spin_trylock_bh(&chan->cleanup_lock))
- return;
-
- if (!ioat_cleanup_preamble(chan, &phys_complete)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- if (!spin_trylock_bh(&ioat->ring_lock)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- __cleanup(ioat, phys_complete);
-
- spin_unlock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
spin_unlock_bh(&chan->cleanup_lock);
}
@@ -287,12 +272,10 @@ void ioat2_timer_event(unsigned long data)
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- spin_lock_bh(&chan->cleanup_lock);
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
unsigned long phys_complete;
u64 status;
- spin_lock_bh(&ioat->ring_lock);
status = ioat_chansts(chan);
/* when halted due to errors check for channel
@@ -311,26 +294,31 @@ void ioat2_timer_event(unsigned long data)
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
- if (ioat_cleanup_preamble(chan, &phys_complete))
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete)) {
__cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+ } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
ioat2_restart_channel(ioat);
- else {
+ spin_unlock_bh(&ioat->prep_lock);
+ } else {
set_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
}
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
} else {
u16 active;
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
active = ioat2_ring_active(ioat);
if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
/* keep shrinking until we get back to our minimum
* default size
@@ -338,7 +326,6 @@ void ioat2_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
- spin_unlock_bh(&chan->cleanup_lock);
}
static int ioat2_reset_hw(struct ioat_chan_common *chan)
@@ -392,7 +379,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
ioat_init_channel(device, &ioat->base, i);
ioat->xfercap_log = xfercap_log;
- spin_lock_init(&ioat->ring_lock);
+ spin_lock_init(&ioat->prep_lock);
if (device->reset_hw(&ioat->base)) {
i = 0;
break;
@@ -418,8 +405,17 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+ /* make descriptor updates visible before advancing ioat->head,
+ * this is purposefully not smp_wmb() since we are also
+ * publishing the descriptor updates to a dma device
+ */
+ wmb();
+
+ ioat->head += ioat->produce;
+
ioat2_update_pending(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
return cookie;
}
@@ -531,13 +527,15 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
if (!ring)
return -ENOMEM;
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
ioat->ring = ring;
ioat->head = 0;
ioat->issued = 0;
ioat->tail = 0;
ioat->alloc_order = order;
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
tasklet_enable(&chan->cleanup_task);
ioat2_start_null_desc(ioat);
@@ -553,7 +551,7 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
*/
struct ioat_chan_common *chan = &ioat->base;
struct dma_chan *c = &chan->common;
- const u16 curr_size = ioat2_ring_mask(ioat) + 1;
+ const u16 curr_size = ioat2_ring_size(ioat);
const u16 active = ioat2_ring_active(ioat);
const u16 new_size = 1 << order;
struct ioat_ring_ent **ring;
@@ -653,54 +651,61 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
}
/**
- * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
- * @idx: gets starting descriptor index on successful allocation
+ * ioat2_check_space_lock - verify space and grab ring producer lock
* @ioat: ioat2,3 channel (ring) to operate on
* @num_descs: allocation length
*/
-int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
+int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
{
struct ioat_chan_common *chan = &ioat->base;
+ bool retry;
- spin_lock_bh(&ioat->ring_lock);
+ retry:
+ spin_lock_bh(&ioat->prep_lock);
/* never allow the last descriptor to be consumed, we need at
* least one free at all times to allow for on-the-fly ring
* resizing.
*/
- while (unlikely(ioat2_ring_space(ioat) <= num_descs)) {
- if (reshape_ring(ioat, ioat->alloc_order + 1) &&
- ioat2_ring_space(ioat) > num_descs)
- break;
-
- if (printk_ratelimit())
- dev_dbg(to_dev(chan),
- "%s: ring full! num_descs: %d (%x:%x:%x)\n",
- __func__, num_descs, ioat->head, ioat->tail,
- ioat->issued);
- spin_unlock_bh(&ioat->ring_lock);
-
- /* progress reclaim in the allocation failure case we
- * may be called under bh_disabled so we need to trigger
- * the timer event directly
- */
- spin_lock_bh(&chan->cleanup_lock);
- if (jiffies > chan->timer.expires &&
- timer_pending(&chan->timer)) {
- struct ioatdma_device *device = chan->device;
-
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- spin_unlock_bh(&chan->cleanup_lock);
- device->timer_fn((unsigned long) &chan->common);
- } else
- spin_unlock_bh(&chan->cleanup_lock);
- return -ENOMEM;
+ if (likely(ioat2_ring_space(ioat) > num_descs)) {
+ dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
+ __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
+ ioat->produce = num_descs;
+ return 0; /* with ioat->prep_lock held */
}
+ retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
+ spin_unlock_bh(&ioat->prep_lock);
- dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
- __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
+ /* is another cpu already trying to expand the ring? */
+ if (retry)
+ goto retry;
- *idx = ioat2_desc_alloc(ioat, num_descs);
- return 0; /* with ioat->ring_lock held */
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
+ retry = reshape_ring(ioat, ioat->alloc_order + 1);
+ clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+
+ /* if we were able to expand the ring retry the allocation */
+ if (retry)
+ goto retry;
+
+ if (printk_ratelimit())
+ dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
+ __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
+
+ /* progress reclaim in the allocation failure case we may be
+ * called under bh_disabled so we need to trigger the timer
+ * event directly
+ */
+ if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
+ struct ioatdma_device *device = chan->device;
+
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ device->timer_fn((unsigned long) &chan->common);
+ }
+
+ return -ENOMEM;
}
struct dma_async_tx_descriptor *
@@ -713,14 +718,11 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dst = dma_dest;
dma_addr_t src = dma_src;
size_t total_len = len;
- int num_descs;
- u16 idx;
- int i;
+ int num_descs, idx, i;
num_descs = ioat2_xferlen_to_descs(ioat, len);
- if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
- /* pass */;
+ if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -777,7 +779,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
device->cleanup_fn((unsigned long) c);
device->reset_hw(chan);
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
descs = ioat2_ring_space(ioat);
dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
for (i = 0; i < descs; i++) {
@@ -800,7 +803,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
ioat->alloc_order = 0;
pci_pool_free(device->completion_pool, chan->completion,
chan->completion_dma);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
chan->last_completion = 0;
chan->completion_dma = 0;
@@ -855,7 +859,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_tx_status;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index ef2871fd7868..a2c413b2b8d8 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -22,6 +22,7 @@
#define IOATDMA_V2_H
#include <linux/dmaengine.h>
+#include <linux/circ_buf.h>
#include "dma.h"
#include "hw.h"
@@ -49,8 +50,9 @@ extern int ioat_ring_alloc_order;
* @tail: cleanup index
* @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors
+ * @produce: number of descriptors to produce at submit time
* @ring: software ring buffer implementation of hardware ring
- * @ring_lock: protects ring attributes
+ * @prep_lock: serializes descriptor preparation (producers)
*/
struct ioat2_dma_chan {
struct ioat_chan_common base;
@@ -60,8 +62,9 @@ struct ioat2_dma_chan {
u16 tail;
u16 dmacount;
u16 alloc_order;
+ u16 produce;
struct ioat_ring_ent **ring;
- spinlock_t ring_lock;
+ spinlock_t prep_lock;
};
static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
@@ -71,38 +74,26 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
return container_of(chan, struct ioat2_dma_chan, base);
}
-static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat)
+static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat)
{
- return (1 << ioat->alloc_order) - 1;
+ return 1 << ioat->alloc_order;
}
/* count of descriptors in flight with the engine */
static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
{
- return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat);
+ return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat));
}
/* count of descriptors pending submission to hardware */
static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
{
- return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat);
+ return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
}
static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
{
- u16 num_descs = ioat2_ring_mask(ioat) + 1;
- u16 active = ioat2_ring_active(ioat);
-
- BUG_ON(active > num_descs);
-
- return num_descs - active;
-}
-
-/* assumes caller already checked space */
-static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
-{
- ioat->head += len;
- return ioat->head - len;
+ return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
}
static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
@@ -151,7 +142,7 @@ struct ioat_ring_ent {
static inline struct ioat_ring_ent *
ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
{
- return ioat->ring[idx & ioat2_ring_mask(ioat)];
+ return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)];
}
static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
@@ -168,7 +159,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs);
+int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
int ioat2_enumerate_channels(struct ioatdma_device *device);
struct dma_async_tx_descriptor *
ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 6740e319c9cf..1cdd22e1051b 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -260,8 +260,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent *desc;
bool seen_current = false;
+ int idx = ioat->tail, i;
u16 active;
- int i;
dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
@@ -270,13 +270,14 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
- prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
- desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
+ smp_read_barrier_depends();
+ prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
+ desc = ioat2_get_ring_ent(ioat, idx + i);
dump_desc_dbg(ioat, desc);
tx = &desc->txd;
if (tx->cookie) {
chan->completed_cookie = tx->cookie;
- ioat3_dma_unmap(ioat, desc, ioat->tail + i);
+ ioat3_dma_unmap(ioat, desc, idx + i);
tx->cookie = 0;
if (tx->callback) {
tx->callback(tx->callback_param);
@@ -293,69 +294,30 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
i++;
}
}
- ioat->tail += i;
+ smp_mb(); /* finish all descriptor reads before incrementing tail */
+ ioat->tail = idx + i;
BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
- active = ioat2_ring_active(ioat);
- if (active == 0) {
+ if (active - i == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
/* 5 microsecond delay per pending descriptor */
- writew(min((5 * active), IOAT_INTRDELAY_MASK),
+ writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
}
-/* try to cleanup, but yield (via spin_trylock) to incoming submissions
- * with the expectation that we will immediately poll again shortly
- */
-static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
+static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
- prefetch(chan->completion);
-
- if (!spin_trylock_bh(&chan->cleanup_lock))
- return;
-
- if (!ioat_cleanup_preamble(chan, &phys_complete)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- if (!spin_trylock_bh(&ioat->ring_lock)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- __cleanup(ioat, phys_complete);
-
- spin_unlock_bh(&ioat->ring_lock);
- spin_unlock_bh(&chan->cleanup_lock);
-}
-
-/* run cleanup now because we already delayed the interrupt via INTRDELAY */
-static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
-
- prefetch(chan->completion);
-
spin_lock_bh(&chan->cleanup_lock);
- if (!ioat_cleanup_preamble(chan, &phys_complete)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
- spin_lock_bh(&ioat->ring_lock);
-
- __cleanup(ioat, phys_complete);
-
- spin_unlock_bh(&ioat->ring_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
spin_unlock_bh(&chan->cleanup_lock);
}
@@ -363,7 +325,7 @@ static void ioat3_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
- ioat3_cleanup_sync(ioat);
+ ioat3_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
@@ -384,12 +346,10 @@ static void ioat3_timer_event(unsigned long data)
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- spin_lock_bh(&chan->cleanup_lock);
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
unsigned long phys_complete;
u64 status;
- spin_lock_bh(&ioat->ring_lock);
status = ioat_chansts(chan);
/* when halted due to errors check for channel
@@ -408,26 +368,31 @@ static void ioat3_timer_event(unsigned long data)
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
+ spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
ioat3_restart_channel(ioat);
- else {
+ spin_unlock_bh(&ioat->prep_lock);
+ } else {
set_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
}
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
} else {
u16 active;
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
active = ioat2_ring_active(ioat);
if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
/* keep shrinking until we get back to our minimum
* default size
@@ -435,21 +400,20 @@ static void ioat3_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
- spin_unlock_bh(&chan->cleanup_lock);
}
static enum dma_status
-ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
return DMA_SUCCESS;
- ioat3_cleanup_poll(ioat);
+ ioat3_cleanup(ioat);
- return ioat_is_complete(c, cookie, done, used);
+ return ioat_tx_status(c, cookie, txstate);
}
static struct dma_async_tx_descriptor *
@@ -460,15 +424,12 @@ ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
struct ioat_ring_ent *desc;
size_t total_len = len;
struct ioat_fill_descriptor *fill;
- int num_descs;
u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
- u16 idx;
- int i;
+ int num_descs, idx, i;
num_descs = ioat2_xferlen_to_descs(ioat, len);
- if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
- /* pass */;
+ if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -513,11 +474,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
struct ioat_xor_descriptor *xor;
struct ioat_xor_ext_descriptor *xor_ex = NULL;
struct ioat_dma_descriptor *hw;
+ int num_descs, with_ext, idx, i;
u32 offset = 0;
- int num_descs;
- int with_ext;
- int i;
- u16 idx;
u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
BUG_ON(src_cnt < 2);
@@ -537,9 +495,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
* (legacy) descriptor to ensure all completion writes arrive in
* order.
*/
- if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
- /* pass */;
+ if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -657,11 +614,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
struct ioat_pq_ext_descriptor *pq_ex = NULL;
struct ioat_dma_descriptor *hw;
u32 offset = 0;
- int num_descs;
- int with_ext;
- int i, s;
- u16 idx;
u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
+ int i, s, idx, with_ext, num_descs;
dev_dbg(to_dev(chan), "%s\n", __func__);
/* the engine requires at least two sources (we provide
@@ -687,8 +641,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
* order.
*/
if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
- /* pass */;
+ ioat2_check_space_lock(ioat, num_descs+1) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -851,10 +805,9 @@ ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_ring_ent *desc;
struct ioat_dma_descriptor *hw;
- u16 idx;
- if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0)
- desc = ioat2_get_ring_ent(ioat, idx);
+ if (ioat2_check_space_lock(ioat, 1) == 0)
+ desc = ioat2_get_ring_ent(ioat, ioat->head);
else
return NULL;
@@ -977,7 +930,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1031,7 +984,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1072,7 +1025,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test memset timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1115,7 +1068,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1222,7 +1175,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
- dma->xor_align = 2;
+ dma->xor_align = 6;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1233,7 +1186,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (cap & IOAT_CAP_PQ) {
is_raid_device = true;
dma_set_maxpq(dma, 8, 0);
- dma->pq_align = 2;
+ dma->pq_align = 6;
dma_cap_set(DMA_PQ, dma->cap_mask);
dma->device_prep_dma_pq = ioat3_prep_pq;
@@ -1243,7 +1196,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (!(cap & IOAT_CAP_XOR)) {
dma->max_xor = 8;
- dma->xor_align = 2;
+ dma->xor_align = 6;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_pqxor;
@@ -1259,11 +1212,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (is_raid_device) {
- dma->device_is_tx_complete = ioat3_is_complete;
+ dma->device_tx_status = ioat3_tx_status;
device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
} else {
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_dma_tx_status;
device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event;
}
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 99ec26725bae..fab37d1cf48d 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -138,15 +138,10 @@ static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_devic
if (err)
return err;
- device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
- if (!device)
- return -ENOMEM;
-
- pci_set_master(pdev);
-
device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
if (!device)
return -ENOMEM;
+ pci_set_master(pdev);
pci_set_drvdata(pdev, device);
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 1ebc801678b0..161c452923b8 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -894,14 +894,14 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan)
}
/**
- * iop_adma_is_complete - poll the status of an ADMA transaction
+ * iop_adma_status - poll the status of an ADMA transaction
* @chan: ADMA channel handle
* @cookie: ADMA transaction identifier
+ * @txstate: a holder for the current state of the channel or NULL
*/
-static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
+static enum dma_status iop_adma_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
dma_cookie_t last_used;
@@ -910,12 +910,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = iop_chan->completed_cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS)
return ret;
@@ -924,11 +919,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = iop_chan->completed_cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -1043,7 +1034,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(1);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
@@ -1143,7 +1134,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
@@ -1190,7 +1181,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1214,7 +1205,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test memset timed out, disabling\n");
err = -ENODEV;
@@ -1246,7 +1237,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1341,7 +1332,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test pq timed out, disabling\n");
err = -ENODEV;
@@ -1378,7 +1369,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
err = -ENODEV;
@@ -1410,7 +1401,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
err = -ENODEV;
@@ -1508,7 +1499,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
/* set base routines */
dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
- dma_dev->device_is_tx_complete = iop_adma_is_complete;
+ dma_dev->device_tx_status = iop_adma_status;
dma_dev->device_issue_pending = iop_adma_issue_pending;
dma_dev->dev = &pdev->dev;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 2a446397c884..cb26ee9773d6 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1472,13 +1472,18 @@ static void idmac_issue_pending(struct dma_chan *chan)
*/
}
-static void __idmac_terminate_all(struct dma_chan *chan)
+static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device);
unsigned long flags;
int i;
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
ipu_disable_channel(idmac, ichan,
ichan->status >= IPU_CHANNEL_ENABLED);
@@ -1505,17 +1510,23 @@ static void __idmac_terminate_all(struct dma_chan *chan)
tasklet_enable(&to_ipu(idmac)->tasklet);
ichan->status = IPU_CHANNEL_INITIALIZED;
+
+ return 0;
}
-static void idmac_terminate_all(struct dma_chan *chan)
+static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
+ int ret;
mutex_lock(&ichan->chan_mutex);
- __idmac_terminate_all(chan);
+ ret = __idmac_control(chan, cmd, arg);
mutex_unlock(&ichan->chan_mutex);
+
+ return ret;
}
#ifdef DEBUG
@@ -1607,7 +1618,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
mutex_lock(&ichan->chan_mutex);
- __idmac_terminate_all(chan);
+ __idmac_control(chan, DMA_TERMINATE_ALL, 0);
if (ichan->status > IPU_CHANNEL_FREE) {
#ifdef DEBUG
@@ -1637,15 +1648,12 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
tasklet_schedule(&to_ipu(idmac)->tasklet);
}
-static enum dma_status idmac_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+static enum dma_status idmac_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
- if (done)
- *done = ichan->completed;
- if (used)
- *used = chan->cookie;
+ dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0);
if (cookie != chan->cookie)
return DMA_ERROR;
return DMA_SUCCESS;
@@ -1664,12 +1672,12 @@ static int __init ipu_idmac_init(struct ipu *ipu)
dma->dev = ipu->dev;
dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
dma->device_free_chan_resources = idmac_free_chan_resources;
- dma->device_is_tx_complete = idmac_is_tx_complete;
+ dma->device_tx_status = idmac_tx_status;
dma->device_issue_pending = idmac_issue_pending;
/* Compulsory for DMA_SLAVE fields */
dma->device_prep_slave_sg = idmac_prep_slave_sg;
- dma->device_terminate_all = idmac_terminate_all;
+ dma->device_control = idmac_control;
INIT_LIST_HEAD(&dma->channels);
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1703,7 +1711,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
struct idmac_channel *ichan = ipu->channel + i;
- idmac_terminate_all(&ichan->dma_chan);
+ idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0);
idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index bbbd58566625..14a8c0f1698e 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -541,8 +541,8 @@ static void mpc_dma_issue_pending(struct dma_chan *chan)
/* Check request completion status */
static enum dma_status
-mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
unsigned long flags;
@@ -554,12 +554,7 @@ mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
last_complete = mchan->completed_cookie;
spin_unlock_irqrestore(&mchan->lock, flags);
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -635,7 +630,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
static int __devinit mpc_dma_probe(struct of_device *op,
const struct of_device_id *match)
{
- struct device_node *dn = op->node;
+ struct device_node *dn = op->dev.of_node;
struct device *dev = &op->dev;
struct dma_device *dma;
struct mpc_dma *mdma;
@@ -663,7 +658,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
}
regs_start = res.start;
- regs_size = res.end - res.start + 1;
+ regs_size = resource_size(&res);
if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n");
@@ -694,7 +689,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
- dma->device_is_tx_complete = mpc_dma_is_tx_complete;
+ dma->device_tx_status = mpc_dma_tx_status;
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
INIT_LIST_HEAD(&dma->channels);
@@ -776,12 +771,12 @@ static struct of_device_id mpc_dma_match[] = {
};
static struct of_platform_driver mpc_dma_driver = {
- .match_table = mpc_dma_match,
.probe = mpc_dma_probe,
.remove = __devexit_p(mpc_dma_remove),
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mpc_dma_match,
},
};
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e2fd34da64f2..86c5ae9fde34 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -810,14 +810,14 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
}
/**
- * mv_xor_is_complete - poll the status of an XOR transaction
+ * mv_xor_status - poll the status of an XOR transaction
* @chan: XOR channel handle
* @cookie: XOR transaction identifier
+ * @txstate: XOR transactions state holder (or NULL)
*/
-static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
+static enum dma_status mv_xor_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
dma_cookie_t last_used;
@@ -827,10 +827,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = mv_chan->completed_cookie;
mv_chan->is_complete_cookie = cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS) {
@@ -842,11 +839,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = mv_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -975,7 +968,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
async_tx_ack(tx);
msleep(1);
- if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
@@ -1073,7 +1066,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
async_tx_ack(tx);
msleep(8);
- if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
@@ -1168,7 +1161,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
/* set base routines */
dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
- dma_dev->device_is_tx_complete = mv_xor_is_complete;
+ dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
dma_dev->dev = &pdev->dev;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
new file mode 100644
index 000000000000..7c50f6dfd3f4
--- /dev/null
+++ b/drivers/dma/pl330.c
@@ -0,0 +1,866 @@
+/* linux/drivers/dma/pl330.c
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/pl330.h>
+
+#define NR_DEFAULT_DESC 16
+
+enum desc_status {
+ /* In the DMAC pool */
+ FREE,
+ /*
+ * Allocted to some channel during prep_xxx
+ * Also may be sitting on the work_list.
+ */
+ PREP,
+ /*
+ * Sitting on the work_list and already submitted
+ * to the PL330 core. Not more than two descriptors
+ * of a channel can be BUSY at any time.
+ */
+ BUSY,
+ /*
+ * Sitting on the channel work_list but xfer done
+ * by PL330 core
+ */
+ DONE,
+};
+
+struct dma_pl330_chan {
+ /* Schedule desc completion */
+ struct tasklet_struct task;
+
+ /* DMA-Engine Channel */
+ struct dma_chan chan;
+
+ /* Last completed cookie */
+ dma_cookie_t completed;
+
+ /* List of to be xfered descriptors */
+ struct list_head work_list;
+
+ /* Pointer to the DMAC that manages this channel,
+ * NULL if the channel is available to be acquired.
+ * As the parent, this DMAC also provides descriptors
+ * to the channel.
+ */
+ struct dma_pl330_dmac *dmac;
+
+ /* To protect channel manipulation */
+ spinlock_t lock;
+
+ /* Token of a hardware channel thread of PL330 DMAC
+ * NULL if the channel is available to be acquired.
+ */
+ void *pl330_chid;
+};
+
+struct dma_pl330_dmac {
+ struct pl330_info pif;
+
+ /* DMA-Engine Device */
+ struct dma_device ddma;
+
+ /* Pool of descriptors available for the DMAC's channels */
+ struct list_head desc_pool;
+ /* To protect desc_pool manipulation */
+ spinlock_t pool_lock;
+
+ /* Peripheral channels connected to this DMAC */
+ struct dma_pl330_chan peripherals[0]; /* keep at end */
+};
+
+struct dma_pl330_desc {
+ /* To attach to a queue as child */
+ struct list_head node;
+
+ /* Descriptor for the DMA Engine API */
+ struct dma_async_tx_descriptor txd;
+
+ /* Xfer for PL330 core */
+ struct pl330_xfer px;
+
+ struct pl330_reqcfg rqcfg;
+ struct pl330_req req;
+
+ enum desc_status status;
+
+ /* The channel which currently holds this desc */
+ struct dma_pl330_chan *pchan;
+};
+
+static inline struct dma_pl330_chan *
+to_pchan(struct dma_chan *ch)
+{
+ if (!ch)
+ return NULL;
+
+ return container_of(ch, struct dma_pl330_chan, chan);
+}
+
+static inline struct dma_pl330_desc *
+to_desc(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct dma_pl330_desc, txd);
+}
+
+static inline void free_desc_list(struct list_head *list)
+{
+ struct dma_pl330_dmac *pdmac;
+ struct dma_pl330_desc *desc;
+ struct dma_pl330_chan *pch;
+ unsigned long flags;
+
+ if (list_empty(list))
+ return;
+
+ /* Finish off the work list */
+ list_for_each_entry(desc, list, node) {
+ dma_async_tx_callback callback;
+ void *param;
+
+ /* All desc in a list belong to same channel */
+ pch = desc->pchan;
+ callback = desc->txd.callback;
+ param = desc->txd.callback_param;
+
+ if (callback)
+ callback(param);
+
+ desc->pchan = NULL;
+ }
+
+ pdmac = pch->dmac;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+ list_splice_tail_init(list, &pdmac->desc_pool);
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+}
+
+static inline void fill_queue(struct dma_pl330_chan *pch)
+{
+ struct dma_pl330_desc *desc;
+ int ret;
+
+ list_for_each_entry(desc, &pch->work_list, node) {
+
+ /* If already submitted */
+ if (desc->status == BUSY)
+ break;
+
+ ret = pl330_submit_req(pch->pl330_chid,
+ &desc->req);
+ if (!ret) {
+ desc->status = BUSY;
+ break;
+ } else if (ret == -EAGAIN) {
+ /* QFull or DMAC Dying */
+ break;
+ } else {
+ /* Unacceptable request */
+ desc->status = DONE;
+ dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
+ __func__, __LINE__, desc->txd.cookie);
+ tasklet_schedule(&pch->task);
+ }
+ }
+}
+
+static void pl330_tasklet(unsigned long data)
+{
+ struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
+ struct dma_pl330_desc *desc, *_dt;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ /* Pick up ripe tomatoes */
+ list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
+ if (desc->status == DONE) {
+ pch->completed = desc->txd.cookie;
+ list_move_tail(&desc->node, &list);
+ }
+
+ /* Try to submit a req imm. next to the last completed cookie */
+ fill_queue(pch);
+
+ /* Make sure the PL330 Channel thread is active */
+ pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ free_desc_list(&list);
+}
+
+static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
+{
+ struct dma_pl330_desc *desc = token;
+ struct dma_pl330_chan *pch = desc->pchan;
+ unsigned long flags;
+
+ /* If desc aborted */
+ if (!pch)
+ return;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ desc->status = DONE;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ tasklet_schedule(&pch->task);
+}
+
+static int pl330_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ pch->completed = chan->cookie = 1;
+
+ pch->pl330_chid = pl330_request_channel(&pdmac->pif);
+ if (!pch->pl330_chid) {
+ spin_unlock_irqrestore(&pch->lock, flags);
+ return 0;
+ }
+
+ tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ return 1;
+}
+
+static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_desc *desc;
+ unsigned long flags;
+
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ /* FLUSH the PL330 Channel thread */
+ pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
+
+ /* Mark all desc done */
+ list_for_each_entry(desc, &pch->work_list, node)
+ desc->status = DONE;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ pl330_tasklet((unsigned long) pch);
+
+ return 0;
+}
+
+static void pl330_free_chan_resources(struct dma_chan *chan)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ tasklet_kill(&pch->task);
+
+ pl330_release_channel(pch->pl330_chid);
+ pch->pl330_chid = NULL;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+}
+
+static enum dma_status
+pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ dma_cookie_t last_done, last_used;
+ int ret;
+
+ last_done = pch->completed;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_done, last_used);
+
+ dma_set_tx_state(txstate, last_done, last_used, 0);
+
+ return ret;
+}
+
+static void pl330_issue_pending(struct dma_chan *chan)
+{
+ pl330_tasklet((unsigned long) to_pchan(chan));
+}
+
+/*
+ * We returned the last one of the circular list of descriptor(s)
+ * from prep_xxx, so the argument to submit corresponds to the last
+ * descriptor of the list.
+ */
+static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_pl330_desc *desc, *last = to_desc(tx);
+ struct dma_pl330_chan *pch = to_pchan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ /* Assign cookies to all nodes */
+ cookie = tx->chan->cookie;
+
+ while (!list_empty(&last->node)) {
+ desc = list_entry(last->node.next, struct dma_pl330_desc, node);
+
+ if (++cookie < 0)
+ cookie = 1;
+ desc->txd.cookie = cookie;
+
+ list_move_tail(&desc->node, &pch->work_list);
+ }
+
+ if (++cookie < 0)
+ cookie = 1;
+ last->txd.cookie = cookie;
+
+ list_add_tail(&last->node, &pch->work_list);
+
+ tx->chan->cookie = cookie;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ return cookie;
+}
+
+static inline void _init_desc(struct dma_pl330_desc *desc)
+{
+ desc->pchan = NULL;
+ desc->req.x = &desc->px;
+ desc->req.token = desc;
+ desc->rqcfg.swap = SWAP_NO;
+ desc->rqcfg.privileged = 0;
+ desc->rqcfg.insnaccess = 0;
+ desc->rqcfg.scctl = SCCTRL0;
+ desc->rqcfg.dcctl = DCCTRL0;
+ desc->req.cfg = &desc->rqcfg;
+ desc->req.xfer_cb = dma_pl330_rqcb;
+ desc->txd.tx_submit = pl330_tx_submit;
+
+ INIT_LIST_HEAD(&desc->node);
+}
+
+/* Returns the number of descriptors added to the DMAC pool */
+int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+{
+ struct dma_pl330_desc *desc;
+ unsigned long flags;
+ int i;
+
+ if (!pdmac)
+ return 0;
+
+ desc = kmalloc(count * sizeof(*desc), flg);
+ if (!desc)
+ return 0;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ for (i = 0; i < count; i++) {
+ _init_desc(&desc[i]);
+ list_add_tail(&desc[i].node, &pdmac->desc_pool);
+ }
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+ return count;
+}
+
+static struct dma_pl330_desc *
+pluck_desc(struct dma_pl330_dmac *pdmac)
+{
+ struct dma_pl330_desc *desc = NULL;
+ unsigned long flags;
+
+ if (!pdmac)
+ return NULL;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ if (!list_empty(&pdmac->desc_pool)) {
+ desc = list_entry(pdmac->desc_pool.next,
+ struct dma_pl330_desc, node);
+
+ list_del_init(&desc->node);
+
+ desc->status = PREP;
+ desc->txd.callback = NULL;
+ }
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+ return desc;
+}
+
+static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
+{
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct dma_pl330_peri *peri = pch->chan.private;
+ struct dma_pl330_desc *desc;
+
+ /* Pluck one desc from the pool of DMAC */
+ desc = pluck_desc(pdmac);
+
+ /* If the DMAC pool is empty, alloc new */
+ if (!desc) {
+ if (!add_desc(pdmac, GFP_ATOMIC, 1))
+ return NULL;
+
+ /* Try again */
+ desc = pluck_desc(pdmac);
+ if (!desc) {
+ dev_err(pch->dmac->pif.dev,
+ "%s:%d ALERT!\n", __func__, __LINE__);
+ return NULL;
+ }
+ }
+
+ /* Initialize the descriptor */
+ desc->pchan = pch;
+ desc->txd.cookie = 0;
+ async_tx_ack(&desc->txd);
+
+ desc->req.rqtype = peri->rqtype;
+ desc->req.peri = peri->peri_id;
+
+ dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
+
+ return desc;
+}
+
+static inline void fill_px(struct pl330_xfer *px,
+ dma_addr_t dst, dma_addr_t src, size_t len)
+{
+ px->next = NULL;
+ px->bytes = len;
+ px->dst_addr = dst;
+ px->src_addr = src;
+}
+
+static struct dma_pl330_desc *
+__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
+ dma_addr_t src, size_t len)
+{
+ struct dma_pl330_desc *desc = pl330_get_desc(pch);
+
+ if (!desc) {
+ dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+
+ /*
+ * Ideally we should lookout for reqs bigger than
+ * those that can be programmed with 256 bytes of
+ * MC buffer, but considering a req size is seldom
+ * going to be word-unaligned and more than 200MB,
+ * we take it easy.
+ * Also, should the limit is reached we'd rather
+ * have the platform increase MC buffer size than
+ * complicating this API driver.
+ */
+ fill_px(&desc->px, dst, src, len);
+
+ return desc;
+}
+
+/* Call after fixing burst size */
+static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
+{
+ struct dma_pl330_chan *pch = desc->pchan;
+ struct pl330_info *pi = &pch->dmac->pif;
+ int burst_len;
+
+ burst_len = pi->pcfg.data_bus_width / 8;
+ burst_len *= pi->pcfg.data_buf_dep;
+ burst_len >>= desc->rqcfg.brst_size;
+
+ /* src/dst_burst_len can't be more than 16 */
+ if (burst_len > 16)
+ burst_len = 16;
+
+ while (burst_len > 1) {
+ if (!(len % (burst_len << desc->rqcfg.brst_size)))
+ break;
+ burst_len--;
+ }
+
+ return burst_len;
+}
+
+static struct dma_async_tx_descriptor *
+pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct dma_pl330_desc *desc;
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_peri *peri = chan->private;
+ struct pl330_info *pi;
+ int burst;
+
+ if (unlikely(!pch || !len || !peri))
+ return NULL;
+
+ if (peri->rqtype != MEMTOMEM)
+ return NULL;
+
+ pi = &pch->dmac->pif;
+
+ desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
+ if (!desc)
+ return NULL;
+
+ desc->rqcfg.src_inc = 1;
+ desc->rqcfg.dst_inc = 1;
+
+ /* Select max possible burst size */
+ burst = pi->pcfg.data_bus_width / 8;
+
+ while (burst > 1) {
+ if (!(len % burst))
+ break;
+ burst /= 2;
+ }
+
+ desc->rqcfg.brst_size = 0;
+ while (burst != (1 << desc->rqcfg.brst_size))
+ desc->rqcfg.brst_size++;
+
+ desc->rqcfg.brst_len = get_burst_len(desc, len);
+
+ desc->txd.flags = flags;
+
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
+pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flg)
+{
+ struct dma_pl330_desc *first, *desc = NULL;
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_peri *peri = chan->private;
+ struct scatterlist *sg;
+ unsigned long flags;
+ int i, burst_size;
+ dma_addr_t addr;
+
+ if (unlikely(!pch || !sgl || !sg_len))
+ return NULL;
+
+ /* Make sure the direction is consistent */
+ if ((direction == DMA_TO_DEVICE &&
+ peri->rqtype != MEMTODEV) ||
+ (direction == DMA_FROM_DEVICE &&
+ peri->rqtype != DEVTOMEM)) {
+ dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+
+ addr = peri->fifo_addr;
+ burst_size = peri->burst_sz;
+
+ first = NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+
+ desc = pl330_get_desc(pch);
+ if (!desc) {
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+
+ dev_err(pch->dmac->pif.dev,
+ "%s:%d Unable to fetch desc\n",
+ __func__, __LINE__);
+ if (!first)
+ return NULL;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ while (!list_empty(&first->node)) {
+ desc = list_entry(first->node.next,
+ struct dma_pl330_desc, node);
+ list_move_tail(&desc->node, &pdmac->desc_pool);
+ }
+
+ list_move_tail(&first->node, &pdmac->desc_pool);
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+ return NULL;
+ }
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->node);
+
+ if (direction == DMA_TO_DEVICE) {
+ desc->rqcfg.src_inc = 1;
+ desc->rqcfg.dst_inc = 0;
+ fill_px(&desc->px,
+ addr, sg_dma_address(sg), sg_dma_len(sg));
+ } else {
+ desc->rqcfg.src_inc = 0;
+ desc->rqcfg.dst_inc = 1;
+ fill_px(&desc->px,
+ sg_dma_address(sg), addr, sg_dma_len(sg));
+ }
+
+ desc->rqcfg.brst_size = burst_size;
+ desc->rqcfg.brst_len = 1;
+ }
+
+ /* Return the last desc in the chain */
+ desc->txd.flags = flg;
+ return &desc->txd;
+}
+
+static irqreturn_t pl330_irq_handler(int irq, void *data)
+{
+ if (pl330_update(data))
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static int __devinit
+pl330_probe(struct amba_device *adev, struct amba_id *id)
+{
+ struct dma_pl330_platdata *pdat;
+ struct dma_pl330_dmac *pdmac;
+ struct dma_pl330_chan *pch;
+ struct pl330_info *pi;
+ struct dma_device *pd;
+ struct resource *res;
+ int i, ret, irq;
+
+ pdat = adev->dev.platform_data;
+
+ if (!pdat || !pdat->nr_valid_peri) {
+ dev_err(&adev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ /* Allocate a new DMAC and its Channels */
+ pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
+ + sizeof(*pdmac), GFP_KERNEL);
+ if (!pdmac) {
+ dev_err(&adev->dev, "unable to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ pi = &pdmac->pif;
+ pi->dev = &adev->dev;
+ pi->pl330_data = NULL;
+ pi->mcbufsz = pdat->mcbuf_sz;
+
+ res = &adev->res;
+ request_mem_region(res->start, resource_size(res), "dma-pl330");
+
+ pi->base = ioremap(res->start, resource_size(res));
+ if (!pi->base) {
+ ret = -ENXIO;
+ goto probe_err1;
+ }
+
+ irq = adev->irq[0];
+ ret = request_irq(irq, pl330_irq_handler, 0,
+ dev_name(&adev->dev), pi);
+ if (ret)
+ goto probe_err2;
+
+ ret = pl330_add(pi);
+ if (ret)
+ goto probe_err3;
+
+ INIT_LIST_HEAD(&pdmac->desc_pool);
+ spin_lock_init(&pdmac->pool_lock);
+
+ /* Create a descriptor pool of default size */
+ if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
+ dev_warn(&adev->dev, "unable to allocate desc\n");
+
+ pd = &pdmac->ddma;
+ INIT_LIST_HEAD(&pd->channels);
+
+ /* Initialize channel parameters */
+ for (i = 0; i < pdat->nr_valid_peri; i++) {
+ struct dma_pl330_peri *peri = &pdat->peri[i];
+ pch = &pdmac->peripherals[i];
+
+ switch (peri->rqtype) {
+ case MEMTOMEM:
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ break;
+ case MEMTODEV:
+ case DEVTOMEM:
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ break;
+ default:
+ dev_err(&adev->dev, "DEVTODEV Not Supported\n");
+ continue;
+ }
+
+ INIT_LIST_HEAD(&pch->work_list);
+ spin_lock_init(&pch->lock);
+ pch->pl330_chid = NULL;
+ pch->chan.private = peri;
+ pch->chan.device = pd;
+ pch->chan.chan_id = i;
+ pch->dmac = pdmac;
+
+ /* Add the channel to the DMAC list */
+ pd->chancnt++;
+ list_add_tail(&pch->chan.device_node, &pd->channels);
+ }
+
+ pd->dev = &adev->dev;
+
+ pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
+ pd->device_free_chan_resources = pl330_free_chan_resources;
+ pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
+ pd->device_tx_status = pl330_tx_status;
+ pd->device_prep_slave_sg = pl330_prep_slave_sg;
+ pd->device_control = pl330_control;
+ pd->device_issue_pending = pl330_issue_pending;
+
+ ret = dma_async_device_register(pd);
+ if (ret) {
+ dev_err(&adev->dev, "unable to register DMAC\n");
+ goto probe_err4;
+ }
+
+ amba_set_drvdata(adev, pdmac);
+
+ dev_info(&adev->dev,
+ "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
+ dev_info(&adev->dev,
+ "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
+ pi->pcfg.data_buf_dep,
+ pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
+ pi->pcfg.num_peri, pi->pcfg.num_events);
+
+ return 0;
+
+probe_err4:
+ pl330_del(pi);
+probe_err3:
+ free_irq(irq, pi);
+probe_err2:
+ iounmap(pi->base);
+probe_err1:
+ release_mem_region(res->start, resource_size(res));
+ kfree(pdmac);
+
+ return ret;
+}
+
+static int __devexit pl330_remove(struct amba_device *adev)
+{
+ struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
+ struct dma_pl330_chan *pch, *_p;
+ struct pl330_info *pi;
+ struct resource *res;
+ int irq;
+
+ if (!pdmac)
+ return 0;
+
+ amba_set_drvdata(adev, NULL);
+
+ /* Idle the DMAC */
+ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
+ chan.device_node) {
+
+ /* Remove the channel */
+ list_del(&pch->chan.device_node);
+
+ /* Flush the channel */
+ pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
+ pl330_free_chan_resources(&pch->chan);
+ }
+
+ pi = &pdmac->pif;
+
+ pl330_del(pi);
+
+ irq = adev->irq[0];
+ free_irq(irq, pi);
+
+ iounmap(pi->base);
+
+ res = &adev->res;
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(pdmac);
+
+ return 0;
+}
+
+static struct amba_id pl330_ids[] = {
+ {
+ .id = 0x00041330,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl330_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "dma-pl330",
+ },
+ .id_table = pl330_ids,
+ .probe = pl330_probe,
+ .remove = pl330_remove,
+};
+
+static int __init pl330_init(void)
+{
+ return amba_driver_register(&pl330_driver);
+}
+module_init(pl330_init);
+
+static void __exit pl330_exit(void)
+{
+ amba_driver_unregister(&pl330_driver);
+ return;
+}
+module_exit(pl330_exit);
+
+MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("API Driver for PL330 DMAC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index d44626fa35ad..5a22ca6927e5 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -3935,12 +3935,13 @@ static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
}
/**
- * ppc440spe_adma_is_complete - poll the status of an ADMA transaction
+ * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
* @chan: ADMA channel handle
* @cookie: ADMA transaction identifier
+ * @txstate: a holder for the current state of the channel
*/
-static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct ppc440spe_adma_chan *ppc440spe_chan;
dma_cookie_t last_used;
@@ -3951,10 +3952,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = ppc440spe_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS)
@@ -3965,10 +3963,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = ppc440spe_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -4180,7 +4175,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
ppc440spe_adma_alloc_chan_resources;
adev->common.device_free_chan_resources =
ppc440spe_adma_free_chan_resources;
- adev->common.device_is_tx_complete = ppc440spe_adma_is_complete;
+ adev->common.device_tx_status = ppc440spe_adma_tx_status;
adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
/* Set prep routines based on capability */
@@ -4399,7 +4394,7 @@ static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
static int __devinit ppc440spe_adma_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct resource res;
struct ppc440spe_adma_device *adev;
struct ppc440spe_adma_chan *chan;
@@ -4631,7 +4626,7 @@ out:
static int __devexit ppc440spe_adma_remove(struct of_device *ofdev)
{
struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct resource res;
struct dma_chan *chan, *_chan;
struct ppc_dma_chan_ref *ref, *_ref;
@@ -4949,12 +4944,12 @@ static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
static struct of_platform_driver ppc440spe_adma_driver = {
- .match_table = ppc440spe_adma_of_match,
.probe = ppc440spe_adma_probe,
.remove = __devexit_p(ppc440spe_adma_remove),
.driver = {
.name = "PPC440SP(E)-ADMA",
.owner = THIS_MODULE,
+ .of_match_table = ppc440spe_adma_of_match,
},
};
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 323afef77802..a2a519fd2a24 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -597,12 +597,17 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
direction, flags);
}
-static void sh_dmae_terminate_all(struct dma_chan *chan)
+static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
if (!chan)
- return;
+ return -EINVAL;
dmae_halt(sh_chan);
@@ -618,6 +623,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
spin_unlock_bh(&sh_chan->desc_lock);
sh_dmae_chan_ld_cleanup(sh_chan, true);
+
+ return 0;
}
static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -715,6 +722,10 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
{
while (__ld_cleanup(sh_chan, all))
;
+
+ if (all)
+ /* Terminating - forgive uncompleted cookies */
+ sh_chan->completed_cookie = sh_chan->common.cookie;
}
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
@@ -749,10 +760,9 @@ static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
sh_chan_xfer_ld_queue(sh_chan);
}
-static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
+static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
dma_cookie_t last_used;
@@ -764,12 +774,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = sh_chan->completed_cookie;
BUG_ON(last_complete < 0);
-
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
spin_lock_bh(&sh_chan->desc_lock);
@@ -1041,12 +1046,12 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
= sh_dmae_alloc_chan_resources;
shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
- shdev->common.device_is_tx_complete = sh_dmae_is_complete;
+ shdev->common.device_tx_status = sh_dmae_tx_status;
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
/* Compulsory for DMA_SLAVE fields */
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
- shdev->common.device_terminate_all = sh_dmae_terminate_all;
+ shdev->common.device_control = sh_dmae_control;
shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
@@ -1187,6 +1192,7 @@ static struct platform_driver sh_dmae_driver = {
.remove = __exit_p(sh_dmae_remove),
.shutdown = sh_dmae_shutdown,
.driver = {
+ .owner = THIS_MODULE,
.name = "sh-dma-engine",
},
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
new file mode 100644
index 000000000000..c426829f6ab8
--- /dev/null
+++ b/drivers/dma/ste_dma40.c
@@ -0,0 +1,2657 @@
+/*
+ * driver/dma/ste_dma40.c
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include <plat/ste_dma40.h>
+
+#include "ste_dma40_ll.h"
+
+#define D40_NAME "dma40"
+
+#define D40_PHY_CHAN -1
+
+/* For masking out/in 2 bit channel positions */
+#define D40_CHAN_POS(chan) (2 * (chan / 2))
+#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
+
+/* Maximum iterations taken before giving up suspending a channel */
+#define D40_SUSPEND_MAX_IT 500
+
+#define D40_ALLOC_FREE (1 << 31)
+#define D40_ALLOC_PHY (1 << 30)
+#define D40_ALLOC_LOG_FREE 0
+
+/* The number of free d40_desc to keep in memory before starting
+ * to kfree() them */
+#define D40_DESC_CACHE_SIZE 50
+
+/* Hardware designer of the block */
+#define D40_PERIPHID2_DESIGNER 0x8
+
+/**
+ * enum 40_command - The different commands and/or statuses.
+ *
+ * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
+ * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
+ * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
+ * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
+ */
+enum d40_command {
+ D40_DMA_STOP = 0,
+ D40_DMA_RUN = 1,
+ D40_DMA_SUSPEND_REQ = 2,
+ D40_DMA_SUSPENDED = 3
+};
+
+/**
+ * struct d40_lli_pool - Structure for keeping LLIs in memory
+ *
+ * @base: Pointer to memory area when the pre_alloc_lli's are not large
+ * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
+ * pre_alloc_lli is used.
+ * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
+ * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
+ * one buffer to one buffer.
+ */
+struct d40_lli_pool {
+ void *base;
+ int size;
+ /* Space for dst and src, plus an extra for padding */
+ u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
+};
+
+/**
+ * struct d40_desc - A descriptor is one DMA job.
+ *
+ * @lli_phy: LLI settings for physical channel. Both src and dst=
+ * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
+ * lli_len equals one.
+ * @lli_log: Same as above but for logical channels.
+ * @lli_pool: The pool with two entries pre-allocated.
+ * @lli_len: Number of LLI's in lli_pool
+ * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
+ * then this transfer job is done.
+ * @txd: DMA engine struct. Used for among other things for communication
+ * during a transfer.
+ * @node: List entry.
+ * @dir: The transfer direction of this job.
+ * @is_in_client_list: true if the client owns this descriptor.
+ *
+ * This descriptor is used for both logical and physical transfers.
+ */
+
+struct d40_desc {
+ /* LLI physical */
+ struct d40_phy_lli_bidir lli_phy;
+ /* LLI logical */
+ struct d40_log_lli_bidir lli_log;
+
+ struct d40_lli_pool lli_pool;
+ u32 lli_len;
+ u32 lli_tcount;
+
+ struct dma_async_tx_descriptor txd;
+ struct list_head node;
+
+ enum dma_data_direction dir;
+ bool is_in_client_list;
+};
+
+/**
+ * struct d40_lcla_pool - LCLA pool settings and data.
+ *
+ * @base: The virtual address of LCLA.
+ * @phy: Physical base address of LCLA.
+ * @base_size: size of lcla.
+ * @lock: Lock to protect the content in this struct.
+ * @alloc_map: Mapping between physical channel and LCLA entries.
+ * @num_blocks: The number of entries of alloc_map. Equals to the
+ * number of physical channels.
+ */
+struct d40_lcla_pool {
+ void *base;
+ dma_addr_t phy;
+ resource_size_t base_size;
+ spinlock_t lock;
+ u32 *alloc_map;
+ int num_blocks;
+};
+
+/**
+ * struct d40_phy_res - struct for handling eventlines mapped to physical
+ * channels.
+ *
+ * @lock: A lock protection this entity.
+ * @num: The physical channel number of this entity.
+ * @allocated_src: Bit mapped to show which src event line's are mapped to
+ * this physical channel. Can also be free or physically allocated.
+ * @allocated_dst: Same as for src but is dst.
+ * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
+ * event line number. Both allocated_src and allocated_dst can not be
+ * allocated to a physical channel, since the interrupt handler has then
+ * no way of figure out which one the interrupt belongs to.
+ */
+struct d40_phy_res {
+ spinlock_t lock;
+ int num;
+ u32 allocated_src;
+ u32 allocated_dst;
+};
+
+struct d40_base;
+
+/**
+ * struct d40_chan - Struct that describes a channel.
+ *
+ * @lock: A spinlock to protect this struct.
+ * @log_num: The logical number, if any of this channel.
+ * @completed: Starts with 1, after first interrupt it is set to dma engine's
+ * current cookie.
+ * @pending_tx: The number of pending transfers. Used between interrupt handler
+ * and tasklet.
+ * @busy: Set to true when transfer is ongoing on this channel.
+ * @phy_chan: Pointer to physical channel which this instance runs on.
+ * @chan: DMA engine handle.
+ * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
+ * transfer and call client callback.
+ * @client: Cliented owned descriptor list.
+ * @active: Active descriptor.
+ * @queue: Queued jobs.
+ * @free: List of free descripts, ready to be reused.
+ * @free_len: Number of descriptors in the free list.
+ * @dma_cfg: The client configuration of this dma channel.
+ * @base: Pointer to the device instance struct.
+ * @src_def_cfg: Default cfg register setting for src.
+ * @dst_def_cfg: Default cfg register setting for dst.
+ * @log_def: Default logical channel settings.
+ * @lcla: Space for one dst src pair for logical channel transfers.
+ * @lcpa: Pointer to dst and src lcpa settings.
+ *
+ * This struct can either "be" a logical or a physical channel.
+ */
+struct d40_chan {
+ spinlock_t lock;
+ int log_num;
+ /* ID of the most recent completed transfer */
+ int completed;
+ int pending_tx;
+ bool busy;
+ struct d40_phy_res *phy_chan;
+ struct dma_chan chan;
+ struct tasklet_struct tasklet;
+ struct list_head client;
+ struct list_head active;
+ struct list_head queue;
+ struct list_head free;
+ int free_len;
+ struct stedma40_chan_cfg dma_cfg;
+ struct d40_base *base;
+ /* Default register configurations */
+ u32 src_def_cfg;
+ u32 dst_def_cfg;
+ struct d40_def_lcsp log_def;
+ struct d40_lcla_elem lcla;
+ struct d40_log_lli_full *lcpa;
+};
+
+/**
+ * struct d40_base - The big global struct, one for each probe'd instance.
+ *
+ * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
+ * @execmd_lock: Lock for execute command usage since several channels share
+ * the same physical register.
+ * @dev: The device structure.
+ * @virtbase: The virtual base address of the DMA's register.
+ * @clk: Pointer to the DMA clock structure.
+ * @phy_start: Physical memory start of the DMA registers.
+ * @phy_size: Size of the DMA register map.
+ * @irq: The IRQ number.
+ * @num_phy_chans: The number of physical channels. Read from HW. This
+ * is the number of available channels for this driver, not counting "Secure
+ * mode" allocated physical channels.
+ * @num_log_chans: The number of logical channels. Calculated from
+ * num_phy_chans.
+ * @dma_both: dma_device channels that can do both memcpy and slave transfers.
+ * @dma_slave: dma_device channels that can do only do slave transfers.
+ * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
+ * @phy_chans: Room for all possible physical channels in system.
+ * @log_chans: Room for all possible logical channels in system.
+ * @lookup_log_chans: Used to map interrupt number to logical channel. Points
+ * to log_chans entries.
+ * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
+ * to phy_chans entries.
+ * @plat_data: Pointer to provided platform_data which is the driver
+ * configuration.
+ * @phy_res: Vector containing all physical channels.
+ * @lcla_pool: lcla pool settings and data.
+ * @lcpa_base: The virtual mapped address of LCPA.
+ * @phy_lcpa: The physical address of the LCPA.
+ * @lcpa_size: The size of the LCPA area.
+ */
+struct d40_base {
+ spinlock_t interrupt_lock;
+ spinlock_t execmd_lock;
+ struct device *dev;
+ void __iomem *virtbase;
+ struct clk *clk;
+ phys_addr_t phy_start;
+ resource_size_t phy_size;
+ int irq;
+ int num_phy_chans;
+ int num_log_chans;
+ struct dma_device dma_both;
+ struct dma_device dma_slave;
+ struct dma_device dma_memcpy;
+ struct d40_chan *phy_chans;
+ struct d40_chan *log_chans;
+ struct d40_chan **lookup_log_chans;
+ struct d40_chan **lookup_phy_chans;
+ struct stedma40_platform_data *plat_data;
+ /* Physical half channels */
+ struct d40_phy_res *phy_res;
+ struct d40_lcla_pool lcla_pool;
+ void *lcpa_base;
+ dma_addr_t phy_lcpa;
+ resource_size_t lcpa_size;
+};
+
+/**
+ * struct d40_interrupt_lookup - lookup table for interrupt handler
+ *
+ * @src: Interrupt mask register.
+ * @clr: Interrupt clear register.
+ * @is_error: true if this is an error interrupt.
+ * @offset: start delta in the lookup_log_chans in d40_base. If equals to
+ * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
+ */
+struct d40_interrupt_lookup {
+ u32 src;
+ u32 clr;
+ bool is_error;
+ int offset;
+};
+
+/**
+ * struct d40_reg_val - simple lookup struct
+ *
+ * @reg: The register.
+ * @val: The value that belongs to the register in reg.
+ */
+struct d40_reg_val {
+ unsigned int reg;
+ unsigned int val;
+};
+
+static int d40_pool_lli_alloc(struct d40_desc *d40d,
+ int lli_len, bool is_log)
+{
+ u32 align;
+ void *base;
+
+ if (is_log)
+ align = sizeof(struct d40_log_lli);
+ else
+ align = sizeof(struct d40_phy_lli);
+
+ if (lli_len == 1) {
+ base = d40d->lli_pool.pre_alloc_lli;
+ d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
+ d40d->lli_pool.base = NULL;
+ } else {
+ d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
+
+ base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
+ d40d->lli_pool.base = base;
+
+ if (d40d->lli_pool.base == NULL)
+ return -ENOMEM;
+ }
+
+ if (is_log) {
+ d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
+ align);
+ d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
+ align);
+ } else {
+ d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
+ align);
+ d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
+ align);
+
+ d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
+ d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
+ }
+
+ return 0;
+}
+
+static void d40_pool_lli_free(struct d40_desc *d40d)
+{
+ kfree(d40d->lli_pool.base);
+ d40d->lli_pool.base = NULL;
+ d40d->lli_pool.size = 0;
+ d40d->lli_log.src = NULL;
+ d40d->lli_log.dst = NULL;
+ d40d->lli_phy.src = NULL;
+ d40d->lli_phy.dst = NULL;
+ d40d->lli_phy.src_addr = 0;
+ d40d->lli_phy.dst_addr = 0;
+}
+
+static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
+ struct d40_desc *desc)
+{
+ dma_cookie_t cookie = d40c->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ d40c->chan.cookie = cookie;
+ desc->txd.cookie = cookie;
+
+ return cookie;
+}
+
+static void d40_desc_reset(struct d40_desc *d40d)
+{
+ d40d->lli_tcount = 0;
+}
+
+static void d40_desc_remove(struct d40_desc *d40d)
+{
+ list_del(&d40d->node);
+}
+
+static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
+{
+ struct d40_desc *desc;
+ struct d40_desc *d;
+ struct d40_desc *_d;
+
+ if (!list_empty(&d40c->client)) {
+ list_for_each_entry_safe(d, _d, &d40c->client, node)
+ if (async_tx_test_ack(&d->txd)) {
+ d40_pool_lli_free(d);
+ d40_desc_remove(d);
+ desc = d;
+ goto out;
+ }
+ }
+
+ if (list_empty(&d40c->free)) {
+ /* Alloc new desc because we're out of used ones */
+ desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
+ if (desc == NULL)
+ goto out;
+ INIT_LIST_HEAD(&desc->node);
+ } else {
+ /* Reuse an old desc. */
+ desc = list_first_entry(&d40c->free,
+ struct d40_desc,
+ node);
+ list_del(&desc->node);
+ d40c->free_len--;
+ }
+out:
+ return desc;
+}
+
+static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ if (d40c->free_len < D40_DESC_CACHE_SIZE) {
+ list_add_tail(&d40d->node, &d40c->free);
+ d40c->free_len++;
+ } else
+ kfree(d40d);
+}
+
+static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
+{
+ list_add_tail(&desc->node, &d40c->active);
+}
+
+static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->active))
+ return NULL;
+
+ d = list_first_entry(&d40c->active,
+ struct d40_desc,
+ node);
+ return d;
+}
+
+static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
+{
+ list_add_tail(&desc->node, &d40c->queue);
+}
+
+static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->queue))
+ return NULL;
+
+ d = list_first_entry(&d40c->queue,
+ struct d40_desc,
+ node);
+ return d;
+}
+
+/* Support functions for logical channels */
+
+static int d40_lcla_id_get(struct d40_chan *d40c,
+ struct d40_lcla_pool *pool)
+{
+ int src_id = 0;
+ int dst_id = 0;
+ struct d40_log_lli *lcla_lidx_base =
+ pool->base + d40c->phy_chan->num * 1024;
+ int i;
+ int lli_per_log = d40c->base->plat_data->llis_per_log;
+
+ if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
+ return 0;
+
+ if (pool->num_blocks > 32)
+ return -EINVAL;
+
+ spin_lock(&pool->lock);
+
+ for (i = 0; i < pool->num_blocks; i++) {
+ if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
+ pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
+ break;
+ }
+ }
+ src_id = i;
+ if (src_id >= pool->num_blocks)
+ goto err;
+
+ for (; i < pool->num_blocks; i++) {
+ if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
+ pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
+ break;
+ }
+ }
+
+ dst_id = i;
+ if (dst_id == src_id)
+ goto err;
+
+ d40c->lcla.src_id = src_id;
+ d40c->lcla.dst_id = dst_id;
+ d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
+ d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
+
+
+ spin_unlock(&pool->lock);
+ return 0;
+err:
+ spin_unlock(&pool->lock);
+ return -EINVAL;
+}
+
+static void d40_lcla_id_put(struct d40_chan *d40c,
+ struct d40_lcla_pool *pool,
+ int id)
+{
+ if (id < 0)
+ return;
+
+ d40c->lcla.src_id = -1;
+ d40c->lcla.dst_id = -1;
+
+ spin_lock(&pool->lock);
+ pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
+ spin_unlock(&pool->lock);
+}
+
+static int d40_channel_execute_command(struct d40_chan *d40c,
+ enum d40_command command)
+{
+ int status, i;
+ void __iomem *active_reg;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->base->execmd_lock, flags);
+
+ if (d40c->phy_chan->num % 2 == 0)
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
+ else
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
+
+ if (command == D40_DMA_SUSPEND_REQ) {
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+
+ if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
+ goto done;
+ }
+
+ writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
+
+ if (command == D40_DMA_SUSPEND_REQ) {
+
+ for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+
+ cpu_relax();
+ /*
+ * Reduce the number of bus accesses while
+ * waiting for the DMA to suspend.
+ */
+ udelay(3);
+
+ if (status == D40_DMA_STOP ||
+ status == D40_DMA_SUSPENDED)
+ break;
+ }
+
+ if (i == D40_SUSPEND_MAX_IT) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
+ __func__, d40c->phy_chan->num, d40c->log_num,
+ status);
+ dump_stack();
+ ret = -EBUSY;
+ }
+
+ }
+done:
+ spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
+ return ret;
+}
+
+static void d40_term_all(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+ struct d40_desc *d;
+ struct d40_desc *_d;
+
+ /* Release active descriptors */
+ while ((d40d = d40_first_active_get(d40c))) {
+ d40_desc_remove(d40d);
+
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release queued descriptors waiting for transfer */
+ while ((d40d = d40_first_queued(d40c))) {
+ d40_desc_remove(d40d);
+
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release client owned descriptors */
+ if (!list_empty(&d40c->client))
+ list_for_each_entry_safe(d, _d, &d40c->client, node) {
+ d40_pool_lli_free(d);
+ d40_desc_remove(d);
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
+ d40c->lcla.src_id);
+ d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
+ d40c->lcla.dst_id);
+
+ d40c->pending_tx = 0;
+ d40c->busy = false;
+}
+
+static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (do_enable)
+ val = D40_ACTIVATE_EVENTLINE;
+ else
+ val = D40_DEACTIVATE_EVENTLINE;
+
+ spin_lock_irqsave(&d40c->phy_chan->lock, flags);
+
+ /* Enable event line connected to device (or memcpy) */
+ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+
+ writel((val << D40_EVENTLINE_POS(event)) |
+ ~D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+ }
+ if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
+ u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+
+ writel((val << D40_EVENTLINE_POS(event)) |
+ ~D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+ }
+
+ spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
+}
+
+static u32 d40_chan_has_events(struct d40_chan *d40c)
+{
+ u32 val = 0;
+
+ /* If SSLNK or SDLNK is zero all events are disabled */
+ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+
+ if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+ return val;
+}
+
+static void d40_config_enable_lidx(struct d40_chan *d40c)
+{
+ /* Set LIDX for lcla */
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
+
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
+}
+
+static int d40_config_write(struct d40_chan *d40c)
+{
+ u32 addr_base;
+ u32 var;
+ int res;
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res)
+ return res;
+
+ /* Odd addresses are even addresses + 4 */
+ addr_base = (d40c->phy_chan->num % 2) * 4;
+ /* Setup channel mode to logical or physical */
+ var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
+ D40_CHAN_POS(d40c->phy_chan->num);
+ writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
+
+ /* Setup operational mode option register */
+ var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
+ 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
+
+ writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ /* Set default config for CFG reg */
+ writel(d40c->src_def_cfg,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSCFG);
+ writel(d40c->dst_def_cfg,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDCFG);
+
+ d40_config_enable_lidx(d40c);
+ }
+ return res;
+}
+
+static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+
+ if (d40d->lli_phy.dst && d40d->lli_phy.src) {
+ d40_phy_lli_write(d40c->base->virtbase,
+ d40c->phy_chan->num,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.src);
+ d40d->lli_tcount = d40d->lli_len;
+ } else if (d40d->lli_log.dst && d40d->lli_log.src) {
+ u32 lli_len;
+ struct d40_log_lli *src = d40d->lli_log.src;
+ struct d40_log_lli *dst = d40d->lli_log.dst;
+
+ src += d40d->lli_tcount;
+ dst += d40d->lli_tcount;
+
+ if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
+ lli_len = d40d->lli_len;
+ else
+ lli_len = d40c->base->plat_data->llis_per_log;
+ d40d->lli_tcount += lli_len;
+ d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
+ d40c->lcla.dst,
+ dst, src,
+ d40c->base->plat_data->llis_per_log);
+ }
+}
+
+static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct d40_chan *d40c = container_of(tx->chan,
+ struct d40_chan,
+ chan);
+ struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ tx->cookie = d40_assign_cookie(d40c, d40d);
+
+ d40_desc_queue(d40c, d40d);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ return tx->cookie;
+}
+
+static int d40_start(struct d40_chan *d40c)
+{
+ int err;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (err)
+ return err;
+ d40_config_set_event(d40c, true);
+ }
+
+ err = d40_channel_execute_command(d40c, D40_DMA_RUN);
+
+ return err;
+}
+
+static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+ int err;
+
+ /* Start queued jobs, if any */
+ d40d = d40_first_queued(d40c);
+
+ if (d40d != NULL) {
+ d40c->busy = true;
+
+ /* Remove from queue */
+ d40_desc_remove(d40d);
+
+ /* Add to active queue */
+ d40_desc_submit(d40c, d40d);
+
+ /* Initiate DMA job */
+ d40_desc_load(d40c, d40d);
+
+ /* Start dma job */
+ err = d40_start(d40c);
+
+ if (err)
+ return NULL;
+ }
+
+ return d40d;
+}
+
+/* called from interrupt context */
+static void dma_tc_handle(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+
+ if (!d40c->phy_chan)
+ return;
+
+ /* Get first active entry from list */
+ d40d = d40_first_active_get(d40c);
+
+ if (d40d == NULL)
+ return;
+
+ if (d40d->lli_tcount < d40d->lli_len) {
+
+ d40_desc_load(d40c, d40d);
+ /* Start dma job */
+ (void) d40_start(d40c);
+ return;
+ }
+
+ if (d40_queue_start(d40c) == NULL)
+ d40c->busy = false;
+
+ d40c->pending_tx++;
+ tasklet_schedule(&d40c->tasklet);
+
+}
+
+static void dma_tasklet(unsigned long data)
+{
+ struct d40_chan *d40c = (struct d40_chan *) data;
+ struct d40_desc *d40d_fin;
+ unsigned long flags;
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ /* Get first active entry from list */
+ d40d_fin = d40_first_active_get(d40c);
+
+ if (d40d_fin == NULL)
+ goto err;
+
+ d40c->completed = d40d_fin->txd.cookie;
+
+ /*
+ * If terminating a channel pending_tx is set to zero.
+ * This prevents any finished active jobs to return to the client.
+ */
+ if (d40c->pending_tx == 0) {
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return;
+ }
+
+ /* Callback to client */
+ callback = d40d_fin->txd.callback;
+ callback_param = d40d_fin->txd.callback_param;
+
+ if (async_tx_test_ack(&d40d_fin->txd)) {
+ d40_pool_lli_free(d40d_fin);
+ d40_desc_remove(d40d_fin);
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d_fin);
+ } else {
+ d40_desc_reset(d40d_fin);
+ if (!d40d_fin->is_in_client_list) {
+ d40_desc_remove(d40d_fin);
+ list_add_tail(&d40d_fin->node, &d40c->client);
+ d40d_fin->is_in_client_list = true;
+ }
+ }
+
+ d40c->pending_tx--;
+
+ if (d40c->pending_tx)
+ tasklet_schedule(&d40c->tasklet);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ if (callback)
+ callback(callback_param);
+
+ return;
+
+ err:
+ /* Rescue manouver if receiving double interrupts */
+ if (d40c->pending_tx > 0)
+ d40c->pending_tx--;
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static irqreturn_t d40_handle_interrupt(int irq, void *data)
+{
+ static const struct d40_interrupt_lookup il[] = {
+ {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
+ {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
+ {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
+ {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
+ {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
+ {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
+ {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
+ {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
+ {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
+ {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
+ };
+
+ int i;
+ u32 regs[ARRAY_SIZE(il)];
+ u32 tmp;
+ u32 idx;
+ u32 row;
+ long chan = -1;
+ struct d40_chan *d40c;
+ unsigned long flags;
+ struct d40_base *base = data;
+
+ spin_lock_irqsave(&base->interrupt_lock, flags);
+
+ /* Read interrupt status of both logical and physical channels */
+ for (i = 0; i < ARRAY_SIZE(il); i++)
+ regs[i] = readl(base->virtbase + il[i].src);
+
+ for (;;) {
+
+ chan = find_next_bit((unsigned long *)regs,
+ BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
+
+ /* No more set bits found? */
+ if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
+ break;
+
+ row = chan / BITS_PER_LONG;
+ idx = chan & (BITS_PER_LONG - 1);
+
+ /* ACK interrupt */
+ tmp = readl(base->virtbase + il[row].clr);
+ tmp |= 1 << idx;
+ writel(tmp, base->virtbase + il[row].clr);
+
+ if (il[row].offset == D40_PHY_CHAN)
+ d40c = base->lookup_phy_chans[idx];
+ else
+ d40c = base->lookup_log_chans[il[row].offset + idx];
+ spin_lock(&d40c->lock);
+
+ if (!il[row].is_error)
+ dma_tc_handle(d40c);
+ else
+ dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
+ __func__, chan, il[row].offset, idx);
+
+ spin_unlock(&d40c->lock);
+ }
+
+ spin_unlock_irqrestore(&base->interrupt_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+
+static int d40_validate_conf(struct d40_chan *d40c,
+ struct stedma40_chan_cfg *conf)
+{
+ int res = 0;
+ u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
+ u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
+ bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
+ == STEDMA40_CHANNEL_IN_LOG_MODE;
+
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
+ dst_event_group == STEDMA40_DEV_DST_MEMORY) {
+ dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
+ src_event_group == STEDMA40_DEV_SRC_MEMORY) {
+ dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
+ dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] No event line\n", __func__);
+ res = -EINVAL;
+ }
+
+ if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
+ (src_event_group != dst_event_group)) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Invalid event group\n", __func__);
+ res = -EINVAL;
+ }
+
+ if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
+ /*
+ * DMAC HW supports it. Will be added to this driver,
+ * in case any dma client requires it.
+ */
+ dev_err(&d40c->chan.dev->device,
+ "[%s] periph to periph not supported\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
+ int log_event_line, bool is_log)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&phy->lock, flags);
+ if (!is_log) {
+ /* Physical interrupts are masked per physical full channel */
+ if (phy->allocated_src == D40_ALLOC_FREE &&
+ phy->allocated_dst == D40_ALLOC_FREE) {
+ phy->allocated_dst = D40_ALLOC_PHY;
+ phy->allocated_src = D40_ALLOC_PHY;
+ goto found;
+ } else
+ goto not_found;
+ }
+
+ /* Logical channel */
+ if (is_src) {
+ if (phy->allocated_src == D40_ALLOC_PHY)
+ goto not_found;
+
+ if (phy->allocated_src == D40_ALLOC_FREE)
+ phy->allocated_src = D40_ALLOC_LOG_FREE;
+
+ if (!(phy->allocated_src & (1 << log_event_line))) {
+ phy->allocated_src |= 1 << log_event_line;
+ goto found;
+ } else
+ goto not_found;
+ } else {
+ if (phy->allocated_dst == D40_ALLOC_PHY)
+ goto not_found;
+
+ if (phy->allocated_dst == D40_ALLOC_FREE)
+ phy->allocated_dst = D40_ALLOC_LOG_FREE;
+
+ if (!(phy->allocated_dst & (1 << log_event_line))) {
+ phy->allocated_dst |= 1 << log_event_line;
+ goto found;
+ } else
+ goto not_found;
+ }
+
+not_found:
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return false;
+found:
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return true;
+}
+
+static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
+ int log_event_line)
+{
+ unsigned long flags;
+ bool is_free = false;
+
+ spin_lock_irqsave(&phy->lock, flags);
+ if (!log_event_line) {
+ /* Physical interrupts are masked per physical full channel */
+ phy->allocated_dst = D40_ALLOC_FREE;
+ phy->allocated_src = D40_ALLOC_FREE;
+ is_free = true;
+ goto out;
+ }
+
+ /* Logical channel */
+ if (is_src) {
+ phy->allocated_src &= ~(1 << log_event_line);
+ if (phy->allocated_src == D40_ALLOC_LOG_FREE)
+ phy->allocated_src = D40_ALLOC_FREE;
+ } else {
+ phy->allocated_dst &= ~(1 << log_event_line);
+ if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
+ phy->allocated_dst = D40_ALLOC_FREE;
+ }
+
+ is_free = ((phy->allocated_src | phy->allocated_dst) ==
+ D40_ALLOC_FREE);
+
+out:
+ spin_unlock_irqrestore(&phy->lock, flags);
+
+ return is_free;
+}
+
+static int d40_allocate_channel(struct d40_chan *d40c)
+{
+ int dev_type;
+ int event_group;
+ int event_line;
+ struct d40_phy_res *phys;
+ int i;
+ int j;
+ int log_num;
+ bool is_src;
+ bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
+ == STEDMA40_CHANNEL_IN_LOG_MODE;
+
+
+ phys = d40c->base->phy_res;
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
+ dev_type = d40c->dma_cfg.src_dev_type;
+ log_num = 2 * dev_type;
+ is_src = true;
+ } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ /* dst event lines are used for logical memcpy */
+ dev_type = d40c->dma_cfg.dst_dev_type;
+ log_num = 2 * dev_type + 1;
+ is_src = false;
+ } else
+ return -EINVAL;
+
+ event_group = D40_TYPE_TO_GROUP(dev_type);
+ event_line = D40_TYPE_TO_EVENT(dev_type);
+
+ if (!is_log) {
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ /* Find physical half channel */
+ for (i = 0; i < d40c->base->num_phy_chans; i++) {
+
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ 0, is_log))
+ goto found_phy;
+ }
+ } else
+ for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
+ int phy_num = j + event_group * 2;
+ for (i = phy_num; i < phy_num + 2; i++) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ 0, is_log))
+ goto found_phy;
+ }
+ }
+ return -EINVAL;
+found_phy:
+ d40c->phy_chan = &phys[i];
+ d40c->log_num = D40_PHY_CHAN;
+ goto out;
+ }
+ if (dev_type == -1)
+ return -EINVAL;
+
+ /* Find logical channel */
+ for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
+ int phy_num = j + event_group * 2;
+ /*
+ * Spread logical channels across all available physical rather
+ * than pack every logical channel at the first available phy
+ * channels.
+ */
+ if (is_src) {
+ for (i = phy_num; i < phy_num + 2; i++) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ event_line, is_log))
+ goto found_log;
+ }
+ } else {
+ for (i = phy_num + 1; i >= phy_num; i--) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ event_line, is_log))
+ goto found_log;
+ }
+ }
+ }
+ return -EINVAL;
+
+found_log:
+ d40c->phy_chan = &phys[i];
+ d40c->log_num = log_num;
+out:
+
+ if (is_log)
+ d40c->base->lookup_log_chans[d40c->log_num] = d40c;
+ else
+ d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
+
+ return 0;
+
+}
+
+static int d40_config_chan(struct d40_chan *d40c,
+ struct stedma40_chan_cfg *info)
+{
+
+ /* Fill in basic CFG register values */
+ d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
+ &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40_log_cfg(&d40c->dma_cfg,
+ &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ d40c->lcpa = d40c->base->lcpa_base +
+ d40c->dma_cfg.src_dev_type * 32;
+ else
+ d40c->lcpa = d40c->base->lcpa_base +
+ d40c->dma_cfg.dst_dev_type * 32 + 16;
+ }
+
+ /* Write channel configuration to the DMA */
+ return d40_config_write(d40c);
+}
+
+static int d40_config_memcpy(struct d40_chan *d40c)
+{
+ dma_cap_mask_t cap = d40c->chan.device->cap_mask;
+
+ if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
+ d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
+ d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
+ d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
+ memcpy[d40c->chan.chan_id];
+
+ } else if (dma_has_cap(DMA_MEMCPY, cap) &&
+ dma_has_cap(DMA_SLAVE, cap)) {
+ d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
+ } else {
+ dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int d40_free_dma(struct d40_chan *d40c)
+{
+
+ int res = 0;
+ u32 event, dir;
+ struct d40_phy_res *phy = d40c->phy_chan;
+ bool is_src;
+
+ /* Terminate all queued and active transfers */
+ d40_term_all(d40c);
+
+ if (phy == NULL) {
+ dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (phy->allocated_src == D40_ALLOC_FREE &&
+ phy->allocated_dst == D40_ALLOC_FREE) {
+ dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
+ __func__);
+ return -EINVAL;
+ }
+
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res) {
+ dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
+ __func__);
+ return res;
+ }
+
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+ dir = D40_CHAN_REG_SDLNK;
+ is_src = false;
+ } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+ dir = D40_CHAN_REG_SSLNK;
+ is_src = true;
+ } else {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unknown direction\n", __func__);
+ return -EINVAL;
+ }
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ /*
+ * Release logical channel, deactivate the event line during
+ * the time physical res is suspended.
+ */
+ writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
+ D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ phy->num * D40_DREG_PCDELTA + dir);
+
+ d40c->base->lookup_log_chans[d40c->log_num] = NULL;
+
+ /*
+ * Check if there are more logical allocation
+ * on this phy channel.
+ */
+ if (!d40_alloc_mask_free(phy, is_src, event)) {
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c)) {
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+ if (res) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Executing RUN command\n",
+ __func__);
+ return res;
+ }
+ }
+ return 0;
+ }
+ } else
+ d40_alloc_mask_free(phy, is_src, 0);
+
+ /* Release physical channel */
+ res = d40_channel_execute_command(d40c, D40_DMA_STOP);
+ if (res) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to stop channel\n", __func__);
+ return res;
+ }
+ d40c->phy_chan = NULL;
+ /* Invalidate channel type */
+ d40c->dma_cfg.channel_type = 0;
+ d40c->base->lookup_phy_chans[phy->num] = NULL;
+
+ return 0;
+
+
+}
+
+static int d40_pause(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res;
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res == 0) {
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40_config_set_event(d40c, false);
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c))
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+ }
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return res;
+}
+
+static bool d40_is_paused(struct d40_chan *d40c)
+{
+ bool is_paused = false;
+ unsigned long flags;
+ void __iomem *active_reg;
+ u32 status;
+ u32 event;
+ int res;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->log_num == D40_PHY_CHAN) {
+ if (d40c->phy_chan->num % 2 == 0)
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
+ else
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
+
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+ if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
+ is_paused = true;
+
+ goto _exit;
+ }
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res != 0)
+ goto _exit;
+
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+ else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+ else {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unknown direction\n", __func__);
+ goto _exit;
+ }
+ status = d40_chan_has_events(d40c);
+ status = (status & D40_EVENTLINE_MASK(event)) >>
+ D40_EVENTLINE_POS(event);
+
+ if (status != D40_DMA_RUN)
+ is_paused = true;
+
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c))
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+
+_exit:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return is_paused;
+
+}
+
+
+static bool d40_tx_is_linked(struct d40_chan *d40c)
+{
+ bool is_link;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
+ else
+ is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK) &
+ D40_SREG_LNK_PHYS_LNK_MASK;
+ return is_link;
+}
+
+static u32 d40_residue(struct d40_chan *d40c)
+{
+ u32 num_elt;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
+ >> D40_MEM_LCSP2_ECNT_POS;
+ else
+ num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDELT) &
+ D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
+ return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
+}
+
+static int d40_resume(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res)
+ goto out;
+
+ /* If bytes left to transfer or linked tx resume job */
+ if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
+ d40_config_set_event(d40c, true);
+ res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+ }
+ } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
+ res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+
+out:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return res;
+}
+
+static u32 stedma40_residue(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ u32 bytes_left;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+ bytes_left = d40_residue(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ return bytes_left;
+}
+
+/* Public DMA functions in addition to the DMA engine framework */
+
+int stedma40_set_psize(struct dma_chan *chan,
+ int src_psize,
+ int dst_psize)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
+ d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
+ d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ goto out;
+ }
+
+ if (src_psize == STEDMA40_PSIZE_PHY_1)
+ d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
+ else {
+ d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
+ D40_SREG_CFG_PSIZE_POS);
+ d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
+ }
+
+ if (dst_psize == STEDMA40_PSIZE_PHY_1)
+ d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
+ else {
+ d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
+ D40_SREG_CFG_PSIZE_POS);
+ d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
+ }
+out:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(stedma40_set_psize);
+
+struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
+ struct scatterlist *sgl_dst,
+ struct scatterlist *sgl_src,
+ unsigned int sgl_len,
+ unsigned long flags)
+{
+ int res;
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int lli_max = d40c->base->plat_data->llis_per_log;
+
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+
+ if (d40d == NULL)
+ goto err;
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+ d40d->lli_len = sgl_len;
+
+ d40d->txd.flags = flags;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ if (sgl_len > 1)
+ /*
+ * Check if there is space available in lcla. If not,
+ * split list into 1-length and run only in lcpa
+ * space.
+ */
+ if (d40_lcla_id_get(d40c,
+ &d40c->base->lcla_pool) != 0)
+ lli_max = 1;
+
+ if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ (void) d40_log_sg_to_lli(d40c->lcla.src_id,
+ sgl_src,
+ sgl_len,
+ d40d->lli_log.src,
+ d40c->log_def.lcsp1,
+ d40c->dma_cfg.src_info.data_width,
+ flags & DMA_PREP_INTERRUPT, lli_max,
+ d40c->base->plat_data->llis_per_log);
+
+ (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
+ sgl_dst,
+ sgl_len,
+ d40d->lli_log.dst,
+ d40c->log_def.lcsp3,
+ d40c->dma_cfg.dst_info.data_width,
+ flags & DMA_PREP_INTERRUPT, lli_max,
+ d40c->base->plat_data->llis_per_log);
+
+
+ } else {
+ if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ res = d40_phy_sg_to_lli(sgl_src,
+ sgl_len,
+ 0,
+ d40d->lli_phy.src,
+ d40d->lli_phy.src_addr,
+ d40c->src_def_cfg,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.src_info.psize,
+ true);
+
+ if (res < 0)
+ goto err;
+
+ res = d40_phy_sg_to_lli(sgl_dst,
+ sgl_len,
+ 0,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.dst_addr,
+ d40c->dst_def_cfg,
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.dst_info.psize,
+ true);
+
+ if (res < 0)
+ goto err;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ }
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ spin_unlock_irqrestore(&d40c->lock, flg);
+
+ return &d40d->txd;
+err:
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return NULL;
+}
+EXPORT_SYMBOL(stedma40_memcpy_sg);
+
+bool stedma40_filter(struct dma_chan *chan, void *data)
+{
+ struct stedma40_chan_cfg *info = data;
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int err;
+
+ if (data) {
+ err = d40_validate_conf(d40c, info);
+ if (!err)
+ d40c->dma_cfg = *info;
+ } else
+ err = d40_config_memcpy(d40c);
+
+ return err == 0;
+}
+EXPORT_SYMBOL(stedma40_filter);
+
+/* DMA ENGINE functions */
+static int d40_alloc_chan_resources(struct dma_chan *chan)
+{
+ int err;
+ unsigned long flags;
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ d40c->completed = chan->cookie = 1;
+
+ /*
+ * If no dma configuration is set (channel_type == 0)
+ * use default configuration
+ */
+ if (d40c->dma_cfg.channel_type == 0) {
+ err = d40_config_memcpy(d40c);
+ if (err)
+ goto err_alloc;
+ }
+
+ err = d40_allocate_channel(d40c);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to allocate channel\n", __func__);
+ goto err_alloc;
+ }
+
+ err = d40_config_chan(d40c, &d40c->dma_cfg);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to configure channel\n",
+ __func__);
+ goto err_config;
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+
+ err_config:
+ (void) d40_free_dma(d40c);
+ err_alloc:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Channel allocation failed\n", __func__);
+ return -EINVAL;
+}
+
+static void d40_free_chan_resources(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ err = d40_free_dma(d40c);
+
+ if (err)
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to free channel\n", __func__);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ size_t size,
+ unsigned long flags)
+{
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int err = 0;
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+
+ if (d40d == NULL) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Descriptor is NULL\n", __func__);
+ goto err;
+ }
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+
+ d40d->txd.flags = flags;
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+
+ if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+ d40d->lli_len = 1;
+
+ d40_log_fill_lli(d40d->lli_log.src,
+ src,
+ size,
+ 0,
+ d40c->log_def.lcsp1,
+ d40c->dma_cfg.src_info.data_width,
+ true, true);
+
+ d40_log_fill_lli(d40d->lli_log.dst,
+ dst,
+ size,
+ 0,
+ d40c->log_def.lcsp3,
+ d40c->dma_cfg.dst_info.data_width,
+ true, true);
+
+ } else {
+
+ if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ err = d40_phy_fill_lli(d40d->lli_phy.src,
+ src,
+ size,
+ d40c->dma_cfg.src_info.psize,
+ 0,
+ d40c->src_def_cfg,
+ true,
+ d40c->dma_cfg.src_info.data_width,
+ false);
+ if (err)
+ goto err_fill_lli;
+
+ err = d40_phy_fill_lli(d40d->lli_phy.dst,
+ dst,
+ size,
+ d40c->dma_cfg.dst_info.psize,
+ 0,
+ d40c->dst_def_cfg,
+ true,
+ d40c->dma_cfg.dst_info.data_width,
+ false);
+
+ if (err)
+ goto err_fill_lli;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return &d40d->txd;
+
+err_fill_lli:
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed filling in PHY LLI\n", __func__);
+ d40_pool_lli_free(d40d);
+err:
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return NULL;
+}
+
+static int d40_prep_slave_sg_log(struct d40_desc *d40d,
+ struct d40_chan *d40c,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ dma_addr_t dev_addr = 0;
+ int total_size;
+ int lli_max = d40c->base->plat_data->llis_per_log;
+
+ if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ d40d->lli_len = sg_len;
+ d40d->lli_tcount = 0;
+
+ if (sg_len > 1)
+ /*
+ * Check if there is space available in lcla.
+ * If not, split list into 1-length and run only
+ * in lcpa space.
+ */
+ if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
+ lli_max = 1;
+
+ if (direction == DMA_FROM_DEVICE) {
+ dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
+ total_size = d40_log_sg_to_dev(&d40c->lcla,
+ sgl, sg_len,
+ &d40d->lli_log,
+ &d40c->log_def,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
+ direction,
+ flags & DMA_PREP_INTERRUPT,
+ dev_addr, lli_max,
+ d40c->base->plat_data->llis_per_log);
+ } else if (direction == DMA_TO_DEVICE) {
+ dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
+ total_size = d40_log_sg_to_dev(&d40c->lcla,
+ sgl, sg_len,
+ &d40d->lli_log,
+ &d40c->log_def,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
+ direction,
+ flags & DMA_PREP_INTERRUPT,
+ dev_addr, lli_max,
+ d40c->base->plat_data->llis_per_log);
+ } else
+ return -EINVAL;
+ if (total_size < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
+ struct d40_chan *d40c,
+ struct scatterlist *sgl,
+ unsigned int sgl_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ dma_addr_t src_dev_addr;
+ dma_addr_t dst_dev_addr;
+ int res;
+
+ if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ d40d->lli_len = sgl_len;
+ d40d->lli_tcount = 0;
+
+ if (direction == DMA_FROM_DEVICE) {
+ dst_dev_addr = 0;
+ src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
+ } else if (direction == DMA_TO_DEVICE) {
+ dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
+ src_dev_addr = 0;
+ } else
+ return -EINVAL;
+
+ res = d40_phy_sg_to_lli(sgl,
+ sgl_len,
+ src_dev_addr,
+ d40d->lli_phy.src,
+ d40d->lli_phy.src_addr,
+ d40c->src_def_cfg,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.src_info.psize,
+ true);
+ if (res < 0)
+ return res;
+
+ res = d40_phy_sg_to_lli(sgl,
+ sgl_len,
+ dst_dev_addr,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.dst_addr,
+ d40c->dst_def_cfg,
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.dst_info.psize,
+ true);
+ if (res < 0)
+ return res;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int err;
+
+ if (d40c->dma_cfg.pre_transfer)
+ d40c->dma_cfg.pre_transfer(chan,
+ d40c->dma_cfg.pre_transfer_data,
+ sg_dma_len(sgl));
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flg);
+
+ if (d40d == NULL)
+ return NULL;
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
+ direction, flags);
+ else
+ err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
+ direction, flags);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to prepare %s slave sg job: %d\n",
+ __func__,
+ d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
+ return NULL;
+ }
+
+ d40d->txd.flags = flags;
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ return &d40d->txd;
+}
+
+static enum dma_status d40_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ last_complete = d40c->completed;
+ last_used = chan->cookie;
+
+ if (d40_is_paused(d40c))
+ ret = DMA_PAUSED;
+ else
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+
+ dma_set_tx_state(txstate, last_complete, last_used,
+ stedma40_residue(chan));
+
+ return ret;
+}
+
+static void d40_issue_pending(struct dma_chan *chan)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ /* Busy means that pending jobs are already being processed */
+ if (!d40c->busy)
+ (void) d40_queue_start(d40c);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ unsigned long flags;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&d40c->lock, flags);
+ d40_term_all(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+ case DMA_PAUSE:
+ return d40_pause(chan);
+ case DMA_RESUME:
+ return d40_resume(chan);
+ }
+
+ /* Other commands are unimplemented */
+ return -ENXIO;
+}
+
+/* Initialization functions */
+
+static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
+ struct d40_chan *chans, int offset,
+ int num_chans)
+{
+ int i = 0;
+ struct d40_chan *d40c;
+
+ INIT_LIST_HEAD(&dma->channels);
+
+ for (i = offset; i < offset + num_chans; i++) {
+ d40c = &chans[i];
+ d40c->base = base;
+ d40c->chan.device = dma;
+
+ /* Invalidate lcla element */
+ d40c->lcla.src_id = -1;
+ d40c->lcla.dst_id = -1;
+
+ spin_lock_init(&d40c->lock);
+
+ d40c->log_num = D40_PHY_CHAN;
+
+ INIT_LIST_HEAD(&d40c->free);
+ INIT_LIST_HEAD(&d40c->active);
+ INIT_LIST_HEAD(&d40c->queue);
+ INIT_LIST_HEAD(&d40c->client);
+
+ d40c->free_len = 0;
+
+ tasklet_init(&d40c->tasklet, dma_tasklet,
+ (unsigned long) d40c);
+
+ list_add_tail(&d40c->chan.device_node,
+ &dma->channels);
+ }
+}
+
+static int __init d40_dmaengine_init(struct d40_base *base,
+ int num_reserved_chans)
+{
+ int err ;
+
+ d40_chan_init(base, &base->dma_slave, base->log_chans,
+ 0, base->num_log_chans);
+
+ dma_cap_zero(base->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
+
+ base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_slave.device_tx_status = d40_tx_status;
+ base->dma_slave.device_issue_pending = d40_issue_pending;
+ base->dma_slave.device_control = d40_control;
+ base->dma_slave.dev = base->dev;
+
+ err = dma_async_device_register(&base->dma_slave);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to register slave channels\n",
+ __func__);
+ goto failure1;
+ }
+
+ d40_chan_init(base, &base->dma_memcpy, base->log_chans,
+ base->num_log_chans, base->plat_data->memcpy_len);
+
+ dma_cap_zero(base->dma_memcpy.cap_mask);
+ dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
+
+ base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_memcpy.device_tx_status = d40_tx_status;
+ base->dma_memcpy.device_issue_pending = d40_issue_pending;
+ base->dma_memcpy.device_control = d40_control;
+ base->dma_memcpy.dev = base->dev;
+ /*
+ * This controller can only access address at even
+ * 32bit boundaries, i.e. 2^2
+ */
+ base->dma_memcpy.copy_align = 2;
+
+ err = dma_async_device_register(&base->dma_memcpy);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to regsiter memcpy only channels\n",
+ __func__);
+ goto failure2;
+ }
+
+ d40_chan_init(base, &base->dma_both, base->phy_chans,
+ 0, num_reserved_chans);
+
+ dma_cap_zero(base->dma_both.cap_mask);
+ dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
+ dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
+
+ base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_both.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_both.device_tx_status = d40_tx_status;
+ base->dma_both.device_issue_pending = d40_issue_pending;
+ base->dma_both.device_control = d40_control;
+ base->dma_both.dev = base->dev;
+ base->dma_both.copy_align = 2;
+ err = dma_async_device_register(&base->dma_both);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to register logical and physical capable channels\n",
+ __func__);
+ goto failure3;
+ }
+ return 0;
+failure3:
+ dma_async_device_unregister(&base->dma_memcpy);
+failure2:
+ dma_async_device_unregister(&base->dma_slave);
+failure1:
+ return err;
+}
+
+/* Initialization functions. */
+
+static int __init d40_phy_res_init(struct d40_base *base)
+{
+ int i;
+ int num_phy_chans_avail = 0;
+ u32 val[2];
+ int odd_even_bit = -2;
+
+ val[0] = readl(base->virtbase + D40_DREG_PRSME);
+ val[1] = readl(base->virtbase + D40_DREG_PRSMO);
+
+ for (i = 0; i < base->num_phy_chans; i++) {
+ base->phy_res[i].num = i;
+ odd_even_bit += 2 * ((i % 2) == 0);
+ if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
+ /* Mark security only channels as occupied */
+ base->phy_res[i].allocated_src = D40_ALLOC_PHY;
+ base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+ } else {
+ base->phy_res[i].allocated_src = D40_ALLOC_FREE;
+ base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
+ num_phy_chans_avail++;
+ }
+ spin_lock_init(&base->phy_res[i].lock);
+ }
+ dev_info(base->dev, "%d of %d physical DMA channels available\n",
+ num_phy_chans_avail, base->num_phy_chans);
+
+ /* Verify settings extended vs standard */
+ val[0] = readl(base->virtbase + D40_DREG_PRTYP);
+
+ for (i = 0; i < base->num_phy_chans; i++) {
+
+ if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
+ (val[0] & 0x3) != 1)
+ dev_info(base->dev,
+ "[%s] INFO: channel %d is misconfigured (%d)\n",
+ __func__, i, val[0] & 0x3);
+
+ val[0] = val[0] >> 2;
+ }
+
+ return num_phy_chans_avail;
+}
+
+static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
+{
+ static const struct d40_reg_val dma_id_regs[] = {
+ /* Peripheral Id */
+ { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
+ { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
+ /*
+ * D40_DREG_PERIPHID2 Depends on HW revision:
+ * MOP500/HREF ED has 0x0008,
+ * ? has 0x0018,
+ * HREF V1 has 0x0028
+ */
+ { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
+
+ /* PCell Id */
+ { .reg = D40_DREG_CELLID0, .val = 0x000d},
+ { .reg = D40_DREG_CELLID1, .val = 0x00f0},
+ { .reg = D40_DREG_CELLID2, .val = 0x0005},
+ { .reg = D40_DREG_CELLID3, .val = 0x00b1}
+ };
+ struct stedma40_platform_data *plat_data;
+ struct clk *clk = NULL;
+ void __iomem *virtbase = NULL;
+ struct resource *res = NULL;
+ struct d40_base *base = NULL;
+ int num_log_chans = 0;
+ int num_phy_chans;
+ int i;
+
+ clk = clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "[%s] No matching clock found\n",
+ __func__);
+ goto failure;
+ }
+
+ clk_enable(clk);
+
+ /* Get IO for DMAC base address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ if (!res)
+ goto failure;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O base") == NULL)
+ goto failure;
+
+ virtbase = ioremap(res->start, resource_size(res));
+ if (!virtbase)
+ goto failure;
+
+ /* HW version check */
+ for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
+ if (dma_id_regs[i].val !=
+ readl(virtbase + dma_id_regs[i].reg)) {
+ dev_err(&pdev->dev,
+ "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
+ __func__,
+ dma_id_regs[i].val,
+ dma_id_regs[i].reg,
+ readl(virtbase + dma_id_regs[i].reg));
+ goto failure;
+ }
+ }
+
+ i = readl(virtbase + D40_DREG_PERIPHID2);
+
+ if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
+ dev_err(&pdev->dev,
+ "[%s] Unknown designer! Got %x wanted %x\n",
+ __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
+ goto failure;
+ }
+
+ /* The number of physical channels on this HW */
+ num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
+
+ dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
+ (i >> 4) & 0xf, res->start);
+
+ plat_data = pdev->dev.platform_data;
+
+ /* Count the number of logical channels in use */
+ for (i = 0; i < plat_data->dev_len; i++)
+ if (plat_data->dev_rx[i] != 0)
+ num_log_chans++;
+
+ for (i = 0; i < plat_data->dev_len; i++)
+ if (plat_data->dev_tx[i] != 0)
+ num_log_chans++;
+
+ base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
+ (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
+ sizeof(struct d40_chan), GFP_KERNEL);
+
+ if (base == NULL) {
+ dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
+ goto failure;
+ }
+
+ base->clk = clk;
+ base->num_phy_chans = num_phy_chans;
+ base->num_log_chans = num_log_chans;
+ base->phy_start = res->start;
+ base->phy_size = resource_size(res);
+ base->virtbase = virtbase;
+ base->plat_data = plat_data;
+ base->dev = &pdev->dev;
+ base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
+ base->log_chans = &base->phy_chans[num_phy_chans];
+
+ base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
+ GFP_KERNEL);
+ if (!base->phy_res)
+ goto failure;
+
+ base->lookup_phy_chans = kzalloc(num_phy_chans *
+ sizeof(struct d40_chan *),
+ GFP_KERNEL);
+ if (!base->lookup_phy_chans)
+ goto failure;
+
+ if (num_log_chans + plat_data->memcpy_len) {
+ /*
+ * The max number of logical channels are event lines for all
+ * src devices and dst devices
+ */
+ base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
+ sizeof(struct d40_chan *),
+ GFP_KERNEL);
+ if (!base->lookup_log_chans)
+ goto failure;
+ }
+ base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
+ GFP_KERNEL);
+ if (!base->lcla_pool.alloc_map)
+ goto failure;
+
+ return base;
+
+failure:
+ if (clk) {
+ clk_disable(clk);
+ clk_put(clk);
+ }
+ if (virtbase)
+ iounmap(virtbase);
+ if (res)
+ release_mem_region(res->start,
+ resource_size(res));
+ if (virtbase)
+ iounmap(virtbase);
+
+ if (base) {
+ kfree(base->lcla_pool.alloc_map);
+ kfree(base->lookup_log_chans);
+ kfree(base->lookup_phy_chans);
+ kfree(base->phy_res);
+ kfree(base);
+ }
+
+ return NULL;
+}
+
+static void __init d40_hw_init(struct d40_base *base)
+{
+
+ static const struct d40_reg_val dma_init_reg[] = {
+ /* Clock every part of the DMA block from start */
+ { .reg = D40_DREG_GCC, .val = 0x0000ff01},
+
+ /* Interrupts on all logical channels */
+ { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
+ };
+ int i;
+ u32 prmseo[2] = {0, 0};
+ u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
+ u32 pcmis = 0;
+ u32 pcicr = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
+ writel(dma_init_reg[i].val,
+ base->virtbase + dma_init_reg[i].reg);
+
+ /* Configure all our dma channels to default settings */
+ for (i = 0; i < base->num_phy_chans; i++) {
+
+ activeo[i % 2] = activeo[i % 2] << 2;
+
+ if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
+ == D40_ALLOC_PHY) {
+ activeo[i % 2] |= 3;
+ continue;
+ }
+
+ /* Enable interrupt # */
+ pcmis = (pcmis << 1) | 1;
+
+ /* Clear interrupt # */
+ pcicr = (pcicr << 1) | 1;
+
+ /* Set channel to physical mode */
+ prmseo[i % 2] = prmseo[i % 2] << 2;
+ prmseo[i % 2] |= 1;
+
+ }
+
+ writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
+ writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
+ writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
+ writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
+
+ /* Write which interrupt to enable */
+ writel(pcmis, base->virtbase + D40_DREG_PCMIS);
+
+ /* Write which interrupt to clear */
+ writel(pcicr, base->virtbase + D40_DREG_PCICR);
+
+}
+
+static int __init d40_probe(struct platform_device *pdev)
+{
+ int err;
+ int ret = -ENOENT;
+ struct d40_base *base;
+ struct resource *res = NULL;
+ int num_reserved_chans;
+ u32 val;
+
+ base = d40_hw_detect_init(pdev);
+
+ if (!base)
+ goto failure;
+
+ num_reserved_chans = d40_phy_res_init(base);
+
+ platform_set_drvdata(pdev, base);
+
+ spin_lock_init(&base->interrupt_lock);
+ spin_lock_init(&base->execmd_lock);
+
+ /* Get IO for logical channel parameter address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
+ if (!res) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev,
+ "[%s] No \"lcpa\" memory resource\n",
+ __func__);
+ goto failure;
+ }
+ base->lcpa_size = resource_size(res);
+ base->phy_lcpa = res->start;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O lcpa") == NULL) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev,
+ "[%s] Failed to request LCPA region 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+
+ /* We make use of ESRAM memory for this. */
+ val = readl(base->virtbase + D40_DREG_LCPA);
+ if (res->start != val && val != 0) {
+ dev_warn(&pdev->dev,
+ "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
+ __func__, val, res->start);
+ } else
+ writel(res->start, base->virtbase + D40_DREG_LCPA);
+
+ base->lcpa_base = ioremap(res->start, resource_size(res));
+ if (!base->lcpa_base) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "[%s] Failed to ioremap LCPA region\n",
+ __func__);
+ goto failure;
+ }
+ /* Get IO for logical channel link address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
+ if (!res) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev,
+ "[%s] No \"lcla\" resource defined\n",
+ __func__);
+ goto failure;
+ }
+
+ base->lcla_pool.base_size = resource_size(res);
+ base->lcla_pool.phy = res->start;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O lcla") == NULL) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev,
+ "[%s] Failed to request LCLA region 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+ val = readl(base->virtbase + D40_DREG_LCLA);
+ if (res->start != val && val != 0) {
+ dev_warn(&pdev->dev,
+ "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
+ __func__, val, res->start);
+ } else
+ writel(res->start, base->virtbase + D40_DREG_LCLA);
+
+ base->lcla_pool.base = ioremap(res->start, resource_size(res));
+ if (!base->lcla_pool.base) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+
+ spin_lock_init(&base->lcla_pool.lock);
+
+ base->lcla_pool.num_blocks = base->num_phy_chans;
+
+ base->irq = platform_get_irq(pdev, 0);
+
+ ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
+
+ if (ret) {
+ dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
+ goto failure;
+ }
+
+ err = d40_dmaengine_init(base, num_reserved_chans);
+ if (err)
+ goto failure;
+
+ d40_hw_init(base);
+
+ dev_info(base->dev, "initialized\n");
+ return 0;
+
+failure:
+ if (base) {
+ if (base->virtbase)
+ iounmap(base->virtbase);
+ if (base->lcla_pool.phy)
+ release_mem_region(base->lcla_pool.phy,
+ base->lcla_pool.base_size);
+ if (base->phy_lcpa)
+ release_mem_region(base->phy_lcpa,
+ base->lcpa_size);
+ if (base->phy_start)
+ release_mem_region(base->phy_start,
+ base->phy_size);
+ if (base->clk) {
+ clk_disable(base->clk);
+ clk_put(base->clk);
+ }
+
+ kfree(base->lcla_pool.alloc_map);
+ kfree(base->lookup_log_chans);
+ kfree(base->lookup_phy_chans);
+ kfree(base->phy_res);
+ kfree(base);
+ }
+
+ dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
+ return ret;
+}
+
+static struct platform_driver d40_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = D40_NAME,
+ },
+};
+
+int __init stedma40_init(void)
+{
+ return platform_driver_probe(&d40_driver, d40_probe);
+}
+arch_initcall(stedma40_init);
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
new file mode 100644
index 000000000000..561fdd8a80c1
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.c
@@ -0,0 +1,454 @@
+/*
+ * driver/dma/ste_dma40_ll.c
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <plat/ste_dma40.h>
+
+#include "ste_dma40_ll.h"
+
+/* Sets up proper LCSP1 and LCSP3 register for a logical channel */
+void d40_log_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *lcsp1, u32 *lcsp3)
+{
+ u32 l3 = 0; /* dst */
+ u32 l1 = 0; /* src */
+
+ /* src is mem? -> increase address pos */
+ if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
+ cfg->dir == STEDMA40_MEM_TO_MEM)
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS;
+
+ /* dst is mem? -> increase address pos */
+ if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
+ cfg->dir == STEDMA40_MEM_TO_MEM)
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS;
+
+ /* src is hw? -> master port 1 */
+ if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
+ cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS;
+
+ /* dst is hw? -> master port 1 */
+ if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
+ cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
+
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
+ l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
+ l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
+ l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
+
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
+ l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
+ l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
+
+ *lcsp1 = l1;
+ *lcsp3 = l3;
+
+}
+
+/* Sets up SRC and DST CFG register for both logical and physical channels */
+void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *src_cfg, u32 *dst_cfg, bool is_log)
+{
+ u32 src = 0;
+ u32 dst = 0;
+
+ if (!is_log) {
+ /* Physical channel */
+ if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) ||
+ (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ /* Set master port to 1 */
+ src |= 1 << D40_SREG_CFG_MST_POS;
+ src |= D40_TYPE_TO_EVENT(cfg->src_dev_type);
+
+ if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
+ src |= 1 << D40_SREG_CFG_PHY_TM_POS;
+ else
+ src |= 3 << D40_SREG_CFG_PHY_TM_POS;
+ }
+ if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) ||
+ (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ /* Set master port to 1 */
+ dst |= 1 << D40_SREG_CFG_MST_POS;
+ dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type);
+
+ if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
+ dst |= 1 << D40_SREG_CFG_PHY_TM_POS;
+ else
+ dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
+ }
+ /* Interrupt on end of transfer for destination */
+ dst |= 1 << D40_SREG_CFG_TIM_POS;
+
+ /* Generate interrupt on error */
+ src |= 1 << D40_SREG_CFG_EIM_POS;
+ dst |= 1 << D40_SREG_CFG_EIM_POS;
+
+ /* PSIZE */
+ if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
+ src |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
+ }
+ if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
+ dst |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
+ }
+
+ /* Element size */
+ src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
+ dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
+
+ } else {
+ /* Logical channel */
+ dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
+ src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
+ }
+
+ if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) {
+ src |= 1 << D40_SREG_CFG_PRI_POS;
+ dst |= 1 << D40_SREG_CFG_PRI_POS;
+ }
+
+ src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS;
+ dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS;
+
+ *src_cfg = src;
+ *dst_cfg = dst;
+}
+
+int d40_phy_fill_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width,
+ bool is_device)
+{
+ int num_elems;
+
+ if (psize == STEDMA40_PSIZE_PHY_1)
+ num_elems = 1;
+ else
+ num_elems = 2 << psize;
+
+ /*
+ * Size is 16bit. data_width is 8, 16, 32 or 64 bit
+ * Block large than 64 KiB must be split.
+ */
+ if (data_size > (0xffff << data_width))
+ return -EINVAL;
+
+ /* Must be aligned */
+ if (!IS_ALIGNED(data, 0x1 << data_width))
+ return -EINVAL;
+
+ /* Transfer size can't be smaller than (num_elms * elem_size) */
+ if (data_size < num_elems * (0x1 << data_width))
+ return -EINVAL;
+
+ /* The number of elements. IE now many chunks */
+ lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
+
+ /*
+ * Distance to next element sized entry.
+ * Usually the size of the element unless you want gaps.
+ */
+ if (!is_device)
+ lli->reg_elt |= (0x1 << data_width) <<
+ D40_SREG_ELEM_PHY_EIDX_POS;
+
+ /* Where the data is */
+ lli->reg_ptr = data;
+ lli->reg_cfg = reg_cfg;
+
+ /* If this scatter list entry is the last one, no next link */
+ if (next_lli == 0)
+ lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS;
+ else
+ lli->reg_lnk = next_lli;
+
+ /* Set/clear interrupt generation on this link item.*/
+ if (term_int)
+ lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS;
+ else
+ lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS);
+
+ /* Post link */
+ lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS;
+
+ return 0;
+}
+
+int d40_phy_sg_to_lli(struct scatterlist *sg,
+ int sg_len,
+ dma_addr_t target,
+ struct d40_phy_lli *lli,
+ dma_addr_t lli_phys,
+ u32 reg_cfg,
+ u32 data_width,
+ int psize,
+ bool term_int)
+{
+ int total_size = 0;
+ int i;
+ struct scatterlist *current_sg = sg;
+ dma_addr_t next_lli_phys;
+ dma_addr_t dst;
+ int err = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+
+ total_size += sg_dma_len(current_sg);
+
+ /* If this scatter list entry is the last one, no next link */
+ if (sg_len - 1 == i)
+ next_lli_phys = 0;
+ else
+ next_lli_phys = ALIGN(lli_phys + (i + 1) *
+ sizeof(struct d40_phy_lli),
+ D40_LLI_ALIGN);
+
+ if (target)
+ dst = target;
+ else
+ dst = sg_phys(current_sg);
+
+ err = d40_phy_fill_lli(&lli[i],
+ dst,
+ sg_dma_len(current_sg),
+ psize,
+ next_lli_phys,
+ reg_cfg,
+ !next_lli_phys,
+ data_width,
+ target == dst);
+ if (err)
+ goto err;
+ }
+
+ return total_size;
+ err:
+ return err;
+}
+
+
+void d40_phy_lli_write(void __iomem *virtbase,
+ u32 phy_chan_num,
+ struct d40_phy_lli *lli_dst,
+ struct d40_phy_lli *lli_src)
+{
+
+ writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
+ writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
+ writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
+ writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
+
+ writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
+ writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
+ writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
+ writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
+
+}
+
+/* DMA logical lli operations */
+
+void d40_log_fill_lli(struct d40_log_lli *lli,
+ dma_addr_t data, u32 data_size,
+ u32 lli_next_off, u32 reg_cfg,
+ u32 data_width,
+ bool term_int, bool addr_inc)
+{
+ lli->lcsp13 = reg_cfg;
+
+ /* The number of elements to transfer */
+ lli->lcsp02 = ((data_size >> data_width) <<
+ D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
+ /* 16 LSBs address of the current element */
+ lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
+ /* 16 MSBs address of the current element */
+ lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK;
+
+ if (addr_inc)
+ lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
+
+ lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
+ /* If this scatter list entry is the last one, no next link */
+ lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
+ D40_MEM_LCSP1_SLOS_MASK;
+
+ if (term_int)
+ lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
+ else
+ lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
+}
+
+int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli_bidir *lli,
+ struct d40_def_lcsp *lcsp,
+ u32 src_data_width,
+ u32 dst_data_width,
+ enum dma_data_direction direction,
+ bool term_int, dma_addr_t dev_addr, int max_len,
+ int llis_per_log)
+{
+ int total_size = 0;
+ struct scatterlist *current_sg = sg;
+ int i;
+ u32 next_lli_off_dst;
+ u32 next_lli_off_src;
+
+ next_lli_off_src = 0;
+ next_lli_off_dst = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+ total_size += sg_dma_len(current_sg);
+
+ /*
+ * If this scatter list entry is the last one or
+ * max length, terminate link.
+ */
+ if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
+ next_lli_off_src = 0;
+ next_lli_off_dst = 0;
+ } else {
+ if (next_lli_off_dst == 0 &&
+ next_lli_off_src == 0) {
+ /* The first lli will be at next_lli_off */
+ next_lli_off_dst = (lcla->dst_id *
+ llis_per_log + 1);
+ next_lli_off_src = (lcla->src_id *
+ llis_per_log + 1);
+ } else {
+ next_lli_off_dst++;
+ next_lli_off_src++;
+ }
+ }
+
+ if (direction == DMA_TO_DEVICE) {
+ d40_log_fill_lli(&lli->src[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off_src,
+ lcsp->lcsp1, src_data_width,
+ term_int && !next_lli_off_src,
+ true);
+ d40_log_fill_lli(&lli->dst[i],
+ dev_addr,
+ sg_dma_len(current_sg),
+ next_lli_off_dst,
+ lcsp->lcsp3, dst_data_width,
+ /* No next == terminal interrupt */
+ term_int && !next_lli_off_dst,
+ false);
+ } else {
+ d40_log_fill_lli(&lli->dst[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off_dst,
+ lcsp->lcsp3, dst_data_width,
+ /* No next == terminal interrupt */
+ term_int && !next_lli_off_dst,
+ true);
+ d40_log_fill_lli(&lli->src[i],
+ dev_addr,
+ sg_dma_len(current_sg),
+ next_lli_off_src,
+ lcsp->lcsp1, src_data_width,
+ term_int && !next_lli_off_src,
+ false);
+ }
+ }
+ return total_size;
+}
+
+int d40_log_sg_to_lli(int lcla_id,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli *lli_sg,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width,
+ bool term_int, int max_len, int llis_per_log)
+{
+ int total_size = 0;
+ struct scatterlist *current_sg = sg;
+ int i;
+ u32 next_lli_off = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+ total_size += sg_dma_len(current_sg);
+
+ /*
+ * If this scatter list entry is the last one or
+ * max length, terminate link.
+ */
+ if (sg_len - 1 == i || ((i+1) % max_len == 0))
+ next_lli_off = 0;
+ else {
+ if (next_lli_off == 0)
+ /* The first lli will be at next_lli_off */
+ next_lli_off = lcla_id * llis_per_log + 1;
+ else
+ next_lli_off++;
+ }
+
+ d40_log_fill_lli(&lli_sg[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off,
+ lcsp13, data_width,
+ term_int && !next_lli_off,
+ true);
+ }
+ return total_size;
+}
+
+void d40_log_lli_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lcla_src,
+ struct d40_log_lli *lcla_dst,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int llis_per_log)
+{
+ u32 slos = 0;
+ u32 dlos = 0;
+ int i;
+
+ lcpa->lcsp0 = lli_src->lcsp02;
+ lcpa->lcsp1 = lli_src->lcsp13;
+ lcpa->lcsp2 = lli_dst->lcsp02;
+ lcpa->lcsp3 = lli_dst->lcsp13;
+
+ slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
+ dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
+
+ for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
+ writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02);
+ writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13);
+ writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02);
+ writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13);
+
+ slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
+ dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
+ }
+}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
new file mode 100644
index 000000000000..2029280cb332
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.h
@@ -0,0 +1,354 @@
+/*
+ * driver/dma/ste_dma40_ll.h
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+#ifndef STE_DMA40_LL_H
+#define STE_DMA40_LL_H
+
+#define D40_DREG_PCBASE 0x400
+#define D40_DREG_PCDELTA (8 * 4)
+#define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */
+
+#define D40_TYPE_TO_GROUP(type) (type / 16)
+#define D40_TYPE_TO_EVENT(type) (type % 16)
+
+/* Most bits of the CFG register are the same in log as in phy mode */
+#define D40_SREG_CFG_MST_POS 15
+#define D40_SREG_CFG_TIM_POS 14
+#define D40_SREG_CFG_EIM_POS 13
+#define D40_SREG_CFG_LOG_INCR_POS 12
+#define D40_SREG_CFG_PHY_PEN_POS 12
+#define D40_SREG_CFG_PSIZE_POS 10
+#define D40_SREG_CFG_ESIZE_POS 8
+#define D40_SREG_CFG_PRI_POS 7
+#define D40_SREG_CFG_LBE_POS 6
+#define D40_SREG_CFG_LOG_GIM_POS 5
+#define D40_SREG_CFG_LOG_MFU_POS 4
+#define D40_SREG_CFG_PHY_TM_POS 4
+#define D40_SREG_CFG_PHY_EVTL_POS 0
+
+
+/* Standard channel parameters - basic mode (element register) */
+#define D40_SREG_ELEM_PHY_ECNT_POS 16
+#define D40_SREG_ELEM_PHY_EIDX_POS 0
+
+#define D40_SREG_ELEM_PHY_ECNT_MASK (0xFFFF << D40_SREG_ELEM_PHY_ECNT_POS)
+
+/* Standard channel parameters - basic mode (Link register) */
+#define D40_SREG_LNK_PHY_TCP_POS 0
+#define D40_SREG_LNK_PHY_LMP_POS 1
+#define D40_SREG_LNK_PHY_PRE_POS 2
+/*
+ * Source destination link address. Contains the
+ * 29-bit byte word aligned address of the reload area.
+ */
+#define D40_SREG_LNK_PHYS_LNK_MASK 0xFFFFFFF8UL
+
+/* Standard basic channel logical mode */
+
+/* Element register */
+#define D40_SREG_ELEM_LOG_ECNT_POS 16
+#define D40_SREG_ELEM_LOG_LIDX_POS 8
+#define D40_SREG_ELEM_LOG_LOS_POS 1
+#define D40_SREG_ELEM_LOG_TCP_POS 0
+
+#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
+
+/* Link register */
+#define D40_DEACTIVATE_EVENTLINE 0x0
+#define D40_ACTIVATE_EVENTLINE 0x1
+#define D40_EVENTLINE_POS(i) (2 * i)
+#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
+
+/* Standard basic channel logical params in memory */
+
+/* LCSP0 */
+#define D40_MEM_LCSP0_ECNT_POS 16
+#define D40_MEM_LCSP0_SPTR_POS 0
+
+#define D40_MEM_LCSP0_ECNT_MASK (0xFFFF << D40_MEM_LCSP0_ECNT_POS)
+#define D40_MEM_LCSP0_SPTR_MASK (0xFFFF << D40_MEM_LCSP0_SPTR_POS)
+
+/* LCSP1 */
+#define D40_MEM_LCSP1_SPTR_POS 16
+#define D40_MEM_LCSP1_SCFG_MST_POS 15
+#define D40_MEM_LCSP1_SCFG_TIM_POS 14
+#define D40_MEM_LCSP1_SCFG_EIM_POS 13
+#define D40_MEM_LCSP1_SCFG_INCR_POS 12
+#define D40_MEM_LCSP1_SCFG_PSIZE_POS 10
+#define D40_MEM_LCSP1_SCFG_ESIZE_POS 8
+#define D40_MEM_LCSP1_SLOS_POS 1
+#define D40_MEM_LCSP1_STCP_POS 0
+
+#define D40_MEM_LCSP1_SPTR_MASK (0xFFFF << D40_MEM_LCSP1_SPTR_POS)
+#define D40_MEM_LCSP1_SCFG_TIM_MASK (0x1 << D40_MEM_LCSP1_SCFG_TIM_POS)
+#define D40_MEM_LCSP1_SCFG_INCR_MASK (0x1 << D40_MEM_LCSP1_SCFG_INCR_POS)
+#define D40_MEM_LCSP1_SCFG_PSIZE_MASK (0x3 << D40_MEM_LCSP1_SCFG_PSIZE_POS)
+#define D40_MEM_LCSP1_SLOS_MASK (0x7F << D40_MEM_LCSP1_SLOS_POS)
+#define D40_MEM_LCSP1_STCP_MASK (0x1 << D40_MEM_LCSP1_STCP_POS)
+
+/* LCSP2 */
+#define D40_MEM_LCSP2_ECNT_POS 16
+
+#define D40_MEM_LCSP2_ECNT_MASK (0xFFFF << D40_MEM_LCSP2_ECNT_POS)
+
+/* LCSP3 */
+#define D40_MEM_LCSP3_DCFG_MST_POS 15
+#define D40_MEM_LCSP3_DCFG_TIM_POS 14
+#define D40_MEM_LCSP3_DCFG_EIM_POS 13
+#define D40_MEM_LCSP3_DCFG_INCR_POS 12
+#define D40_MEM_LCSP3_DCFG_PSIZE_POS 10
+#define D40_MEM_LCSP3_DCFG_ESIZE_POS 8
+#define D40_MEM_LCSP3_DLOS_POS 1
+#define D40_MEM_LCSP3_DTCP_POS 0
+
+#define D40_MEM_LCSP3_DLOS_MASK (0x7F << D40_MEM_LCSP3_DLOS_POS)
+#define D40_MEM_LCSP3_DTCP_MASK (0x1 << D40_MEM_LCSP3_DTCP_POS)
+
+
+/* Standard channel parameter register offsets */
+#define D40_CHAN_REG_SSCFG 0x00
+#define D40_CHAN_REG_SSELT 0x04
+#define D40_CHAN_REG_SSPTR 0x08
+#define D40_CHAN_REG_SSLNK 0x0C
+#define D40_CHAN_REG_SDCFG 0x10
+#define D40_CHAN_REG_SDELT 0x14
+#define D40_CHAN_REG_SDPTR 0x18
+#define D40_CHAN_REG_SDLNK 0x1C
+
+/* DMA Register Offsets */
+#define D40_DREG_GCC 0x000
+#define D40_DREG_PRTYP 0x004
+#define D40_DREG_PRSME 0x008
+#define D40_DREG_PRSMO 0x00C
+#define D40_DREG_PRMSE 0x010
+#define D40_DREG_PRMSO 0x014
+#define D40_DREG_PRMOE 0x018
+#define D40_DREG_PRMOO 0x01C
+#define D40_DREG_LCPA 0x020
+#define D40_DREG_LCLA 0x024
+#define D40_DREG_ACTIVE 0x050
+#define D40_DREG_ACTIVO 0x054
+#define D40_DREG_FSEB1 0x058
+#define D40_DREG_FSEB2 0x05C
+#define D40_DREG_PCMIS 0x060
+#define D40_DREG_PCICR 0x064
+#define D40_DREG_PCTIS 0x068
+#define D40_DREG_PCEIS 0x06C
+#define D40_DREG_LCMIS0 0x080
+#define D40_DREG_LCMIS1 0x084
+#define D40_DREG_LCMIS2 0x088
+#define D40_DREG_LCMIS3 0x08C
+#define D40_DREG_LCICR0 0x090
+#define D40_DREG_LCICR1 0x094
+#define D40_DREG_LCICR2 0x098
+#define D40_DREG_LCICR3 0x09C
+#define D40_DREG_LCTIS0 0x0A0
+#define D40_DREG_LCTIS1 0x0A4
+#define D40_DREG_LCTIS2 0x0A8
+#define D40_DREG_LCTIS3 0x0AC
+#define D40_DREG_LCEIS0 0x0B0
+#define D40_DREG_LCEIS1 0x0B4
+#define D40_DREG_LCEIS2 0x0B8
+#define D40_DREG_LCEIS3 0x0BC
+#define D40_DREG_STFU 0xFC8
+#define D40_DREG_ICFG 0xFCC
+#define D40_DREG_PERIPHID0 0xFE0
+#define D40_DREG_PERIPHID1 0xFE4
+#define D40_DREG_PERIPHID2 0xFE8
+#define D40_DREG_PERIPHID3 0xFEC
+#define D40_DREG_CELLID0 0xFF0
+#define D40_DREG_CELLID1 0xFF4
+#define D40_DREG_CELLID2 0xFF8
+#define D40_DREG_CELLID3 0xFFC
+
+/* LLI related structures */
+
+/**
+ * struct d40_phy_lli - The basic configration register for each physical
+ * channel.
+ *
+ * @reg_cfg: The configuration register.
+ * @reg_elt: The element register.
+ * @reg_ptr: The pointer register.
+ * @reg_lnk: The link register.
+ *
+ * These registers are set up for both physical and logical transfers
+ * Note that the bit in each register means differently in logical and
+ * physical(standard) mode.
+ *
+ * This struct must be 16 bytes aligned, and only contain physical registers
+ * since it will be directly accessed by the DMA.
+ */
+struct d40_phy_lli {
+ u32 reg_cfg;
+ u32 reg_elt;
+ u32 reg_ptr;
+ u32 reg_lnk;
+};
+
+/**
+ * struct d40_phy_lli_bidir - struct for a transfer.
+ *
+ * @src: Register settings for src channel.
+ * @dst: Register settings for dst channel.
+ * @dst_addr: Physical destination address.
+ * @src_addr: Physical source address.
+ *
+ * All DMA transfers have a source and a destination.
+ */
+
+struct d40_phy_lli_bidir {
+ struct d40_phy_lli *src;
+ struct d40_phy_lli *dst;
+ dma_addr_t dst_addr;
+ dma_addr_t src_addr;
+};
+
+
+/**
+ * struct d40_log_lli - logical lli configuration
+ *
+ * @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst.
+ * @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst.
+ *
+ * This struct must be 8 bytes aligned since it will be accessed directy by
+ * the DMA. Never add any none hw mapped registers to this struct.
+ */
+
+struct d40_log_lli {
+ u32 lcsp02;
+ u32 lcsp13;
+};
+
+/**
+ * struct d40_log_lli_bidir - For both src and dst
+ *
+ * @src: pointer to src lli configuration.
+ * @dst: pointer to dst lli configuration.
+ *
+ * You always have a src and a dst when doing DMA transfers.
+ */
+
+struct d40_log_lli_bidir {
+ struct d40_log_lli *src;
+ struct d40_log_lli *dst;
+};
+
+/**
+ * struct d40_log_lli_full - LCPA layout
+ *
+ * @lcsp0: Logical Channel Standard Param 0 - Src.
+ * @lcsp1: Logical Channel Standard Param 1 - Src.
+ * @lcsp2: Logical Channel Standard Param 2 - Dst.
+ * @lcsp3: Logical Channel Standard Param 3 - Dst.
+ *
+ * This struct maps to LCPA physical memory layout. Must map to
+ * the hw.
+ */
+struct d40_log_lli_full {
+ u32 lcsp0;
+ u32 lcsp1;
+ u32 lcsp2;
+ u32 lcsp3;
+};
+
+/**
+ * struct d40_def_lcsp - Default LCSP1 and LCSP3 settings
+ *
+ * @lcsp3: The default configuration for dst.
+ * @lcsp1: The default configuration for src.
+ */
+struct d40_def_lcsp {
+ u32 lcsp3;
+ u32 lcsp1;
+};
+
+/**
+ * struct d40_lcla_elem - Info for one LCA element.
+ *
+ * @src_id: logical channel src id
+ * @dst_id: logical channel dst id
+ * @src: LCPA formated src parameters
+ * @dst: LCPA formated dst parameters
+ *
+ */
+struct d40_lcla_elem {
+ int src_id;
+ int dst_id;
+ struct d40_log_lli *src;
+ struct d40_log_lli *dst;
+};
+
+/* Physical channels */
+
+void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *src_cfg, u32 *dst_cfg, bool is_log);
+
+void d40_log_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *lcsp1, u32 *lcsp2);
+
+int d40_phy_sg_to_lli(struct scatterlist *sg,
+ int sg_len,
+ dma_addr_t target,
+ struct d40_phy_lli *lli,
+ dma_addr_t lli_phys,
+ u32 reg_cfg,
+ u32 data_width,
+ int psize,
+ bool term_int);
+
+int d40_phy_fill_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width,
+ bool is_device);
+
+void d40_phy_lli_write(void __iomem *virtbase,
+ u32 phy_chan_num,
+ struct d40_phy_lli *lli_dst,
+ struct d40_phy_lli *lli_src);
+
+/* Logical channels */
+
+void d40_log_fill_lli(struct d40_log_lli *lli,
+ dma_addr_t data, u32 data_size,
+ u32 lli_next_off, u32 reg_cfg,
+ u32 data_width,
+ bool term_int, bool addr_inc);
+
+int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli_bidir *lli,
+ struct d40_def_lcsp *lcsp,
+ u32 src_data_width,
+ u32 dst_data_width,
+ enum dma_data_direction direction,
+ bool term_int, dma_addr_t dev_addr, int max_len,
+ int llis_per_log);
+
+void d40_log_lli_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lcla_src,
+ struct d40_log_lli *lcla_dst,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int llis_per_log);
+
+int d40_log_sg_to_lli(int lcla_id,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli *lli_sg,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width,
+ bool term_int, int max_len, int llis_per_log);
+
+#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
new file mode 100644
index 000000000000..a1bf77c1993f
--- /dev/null
+++ b/drivers/dma/timb_dma.c
@@ -0,0 +1,860 @@
+/*
+ * timb_dma.c timberdale FPGA DMA driver
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA DMA engine
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/timb_dma.h>
+
+#define DRIVER_NAME "timb-dma"
+
+/* Global DMA registers */
+#define TIMBDMA_ACR 0x34
+#define TIMBDMA_32BIT_ADDR 0x01
+
+#define TIMBDMA_ISR 0x080000
+#define TIMBDMA_IPR 0x080004
+#define TIMBDMA_IER 0x080008
+
+/* Channel specific registers */
+/* RX instances base addresses are 0x00, 0x40, 0x80 ...
+ * TX instances base addresses are 0x18, 0x58, 0x98 ...
+ */
+#define TIMBDMA_INSTANCE_OFFSET 0x40
+#define TIMBDMA_INSTANCE_TX_OFFSET 0x18
+
+/* RX registers, relative the instance base */
+#define TIMBDMA_OFFS_RX_DHAR 0x00
+#define TIMBDMA_OFFS_RX_DLAR 0x04
+#define TIMBDMA_OFFS_RX_LR 0x0C
+#define TIMBDMA_OFFS_RX_BLR 0x10
+#define TIMBDMA_OFFS_RX_ER 0x14
+#define TIMBDMA_RX_EN 0x01
+/* bytes per Row, video specific register
+ * which is placed after the TX registers...
+ */
+#define TIMBDMA_OFFS_RX_BPRR 0x30
+
+/* TX registers, relative the instance base */
+#define TIMBDMA_OFFS_TX_DHAR 0x00
+#define TIMBDMA_OFFS_TX_DLAR 0x04
+#define TIMBDMA_OFFS_TX_BLR 0x0C
+#define TIMBDMA_OFFS_TX_LR 0x14
+
+
+#define TIMB_DMA_DESC_SIZE 8
+
+struct timb_dma_desc {
+ struct list_head desc_node;
+ struct dma_async_tx_descriptor txd;
+ u8 *desc_list;
+ unsigned int desc_list_len;
+ bool interrupt;
+};
+
+struct timb_dma_chan {
+ struct dma_chan chan;
+ void __iomem *membase;
+ spinlock_t lock; /* Used to protect data structures,
+ especially the lists and descriptors,
+ from races between the tasklet and calls
+ from above */
+ dma_cookie_t last_completed_cookie;
+ bool ongoing;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ unsigned int bytes_per_line;
+ enum dma_data_direction direction;
+ unsigned int descs; /* Descriptors to allocate */
+ unsigned int desc_elems; /* number of elems per descriptor */
+};
+
+struct timb_dma {
+ struct dma_device dma;
+ void __iomem *membase;
+ struct tasklet_struct tasklet;
+ struct timb_dma_chan channels[0];
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+static struct device *chan2dmadev(struct dma_chan *chan)
+{
+ return chan2dev(chan)->parent->parent;
+}
+
+static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ return (struct timb_dma *)((u8 *)td_chan -
+ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
+}
+
+/* Must be called with the spinlock held */
+static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ struct timb_dma *td = tdchantotd(td_chan);
+ u32 ier;
+
+ /* enable interrupt for this channel */
+ ier = ioread32(td->membase + TIMBDMA_IER);
+ ier |= 1 << id;
+ dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
+ ier);
+ iowrite32(ier, td->membase + TIMBDMA_IER);
+}
+
+/* Should be called with the spinlock held */
+static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
+ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
+ u32 isr;
+ bool done = false;
+
+ dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
+
+ isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
+ if (isr) {
+ iowrite32(isr, td->membase + TIMBDMA_ISR);
+ done = true;
+ }
+
+ return done;
+}
+
+static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
+ bool single)
+{
+ dma_addr_t addr;
+ int len;
+
+ addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
+ dma_desc[4];
+
+ len = (dma_desc[3] << 8) | dma_desc[2];
+
+ if (single)
+ dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
+ td_chan->direction);
+ else
+ dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
+ td_chan->direction);
+}
+
+static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
+{
+ struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
+ struct timb_dma_chan, chan);
+ u8 *descs;
+
+ for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
+ __td_unmap_desc(td_chan, descs, single);
+ if (descs[0] & 0x02)
+ break;
+ }
+}
+
+static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
+ struct scatterlist *sg, bool last)
+{
+ if (sg_dma_len(sg) > USHRT_MAX) {
+ dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
+ return -EINVAL;
+ }
+
+ /* length must be word aligned */
+ if (sg_dma_len(sg) % sizeof(u32)) {
+ dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
+ sg_dma_len(sg));
+ return -EINVAL;
+ }
+
+ dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n",
+ dma_desc, (void *)sg_dma_address(sg));
+
+ dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
+ dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
+ dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
+ dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
+
+ dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
+ dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
+
+ dma_desc[1] = 0x00;
+ dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
+
+ return 0;
+}
+
+/* Must be called with the spinlock held */
+static void __td_start_dma(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc;
+
+ if (td_chan->ongoing) {
+ dev_err(chan2dev(&td_chan->chan),
+ "Transfer already ongoing\n");
+ return;
+ }
+
+ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
+ desc_node);
+
+ dev_dbg(chan2dev(&td_chan->chan),
+ "td_chan: %p, chan: %d, membase: %p\n",
+ td_chan, td_chan->chan.chan_id, td_chan->membase);
+
+ if (td_chan->direction == DMA_FROM_DEVICE) {
+
+ /* descriptor address */
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
+ iowrite32(td_desc->txd.phys, td_chan->membase +
+ TIMBDMA_OFFS_RX_DLAR);
+ /* Bytes per line */
+ iowrite32(td_chan->bytes_per_line, td_chan->membase +
+ TIMBDMA_OFFS_RX_BPRR);
+ /* enable RX */
+ iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
+ } else {
+ /* address high */
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
+ iowrite32(td_desc->txd.phys, td_chan->membase +
+ TIMBDMA_OFFS_TX_DLAR);
+ }
+
+ td_chan->ongoing = true;
+
+ if (td_desc->interrupt)
+ __td_enable_chan_irq(td_chan);
+}
+
+static void __td_finish(struct timb_dma_chan *td_chan)
+{
+ dma_async_tx_callback callback;
+ void *param;
+ struct dma_async_tx_descriptor *txd;
+ struct timb_dma_desc *td_desc;
+
+ /* can happen if the descriptor is canceled */
+ if (list_empty(&td_chan->active_list))
+ return;
+
+ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
+ desc_node);
+ txd = &td_desc->txd;
+
+ dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
+ txd->cookie);
+
+ /* make sure to stop the transfer */
+ if (td_chan->direction == DMA_FROM_DEVICE)
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
+/* Currently no support for stopping DMA transfers
+ else
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
+*/
+ td_chan->last_completed_cookie = txd->cookie;
+ td_chan->ongoing = false;
+
+ callback = txd->callback;
+ param = txd->callback_param;
+
+ list_move(&td_desc->desc_node, &td_chan->free_list);
+
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
+ __td_unmap_descs(td_desc,
+ txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
+
+ /*
+ * The API requires that no submissions are done from a
+ * callback, so we don't need to drop the lock here
+ */
+ if (callback)
+ callback(param);
+}
+
+static u32 __td_ier_mask(struct timb_dma *td)
+{
+ int i;
+ u32 ret = 0;
+
+ for (i = 0; i < td->dma.chancnt; i++) {
+ struct timb_dma_chan *td_chan = td->channels + i;
+ if (td_chan->ongoing) {
+ struct timb_dma_desc *td_desc =
+ list_entry(td_chan->active_list.next,
+ struct timb_dma_desc, desc_node);
+ if (td_desc->interrupt)
+ ret |= 1 << i;
+ }
+ }
+
+ return ret;
+}
+
+static void __td_start_next(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc;
+
+ BUG_ON(list_empty(&td_chan->queue));
+ BUG_ON(td_chan->ongoing);
+
+ td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
+ desc_node);
+
+ dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
+ __func__, td_desc->txd.cookie);
+
+ list_move(&td_desc->desc_node, &td_chan->active_list);
+ __td_start_dma(td_chan);
+}
+
+static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
+ txd);
+ struct timb_dma_chan *td_chan = container_of(txd->chan,
+ struct timb_dma_chan, chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&td_chan->lock);
+
+ cookie = txd->chan->cookie;
+ if (++cookie < 0)
+ cookie = 1;
+ txd->chan->cookie = cookie;
+ txd->cookie = cookie;
+
+ if (list_empty(&td_chan->active_list)) {
+ dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
+ txd->cookie);
+ list_add_tail(&td_desc->desc_node, &td_chan->active_list);
+ __td_start_dma(td_chan);
+ } else {
+ dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
+ txd->cookie);
+
+ list_add_tail(&td_desc->desc_node, &td_chan->queue);
+ }
+
+ spin_unlock_bh(&td_chan->lock);
+
+ return cookie;
+}
+
+static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
+{
+ struct dma_chan *chan = &td_chan->chan;
+ struct timb_dma_desc *td_desc;
+ int err;
+
+ td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
+ if (!td_desc) {
+ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ goto err;
+ }
+
+ td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
+
+ td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
+ if (!td_desc->desc_list) {
+ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ goto err;
+ }
+
+ dma_async_tx_descriptor_init(&td_desc->txd, chan);
+ td_desc->txd.tx_submit = td_tx_submit;
+ td_desc->txd.flags = DMA_CTRL_ACK;
+
+ td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
+ td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
+ if (err) {
+ dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
+ goto err;
+ }
+
+ return td_desc;
+err:
+ kfree(td_desc->desc_list);
+ kfree(td_desc);
+
+ return NULL;
+
+}
+
+static void td_free_desc(struct timb_dma_desc *td_desc)
+{
+ dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
+ dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
+ td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ kfree(td_desc->desc_list);
+ kfree(td_desc);
+}
+
+static void td_desc_put(struct timb_dma_chan *td_chan,
+ struct timb_dma_desc *td_desc)
+{
+ dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
+
+ spin_lock_bh(&td_chan->lock);
+ list_add(&td_desc->desc_node, &td_chan->free_list);
+ spin_unlock_bh(&td_chan->lock);
+}
+
+static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc, *_td_desc;
+ struct timb_dma_desc *ret = NULL;
+
+ spin_lock_bh(&td_chan->lock);
+ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
+ desc_node) {
+ if (async_tx_test_ack(&td_desc->txd)) {
+ list_del(&td_desc->desc_node);
+ ret = td_desc;
+ break;
+ }
+ dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
+ td_desc);
+ }
+ spin_unlock_bh(&td_chan->lock);
+
+ return ret;
+}
+
+static int td_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ int i;
+
+ dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
+
+ BUG_ON(!list_empty(&td_chan->free_list));
+ for (i = 0; i < td_chan->descs; i++) {
+ struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
+ if (!td_desc) {
+ if (i)
+ break;
+ else {
+ dev_err(chan2dev(chan),
+ "Couldnt allocate any descriptors\n");
+ return -ENOMEM;
+ }
+ }
+
+ td_desc_put(td_chan, td_desc);
+ }
+
+ spin_lock_bh(&td_chan->lock);
+ td_chan->last_completed_cookie = 1;
+ chan->cookie = 1;
+ spin_unlock_bh(&td_chan->lock);
+
+ return 0;
+}
+
+static void td_free_chan_resources(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc, *_td_desc;
+ LIST_HEAD(list);
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ /* check that all descriptors are free */
+ BUG_ON(!list_empty(&td_chan->active_list));
+ BUG_ON(!list_empty(&td_chan->queue));
+
+ spin_lock_bh(&td_chan->lock);
+ list_splice_init(&td_chan->free_list, &list);
+ spin_unlock_bh(&td_chan->lock);
+
+ list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
+ dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
+ td_desc);
+ td_free_desc(td_desc);
+ }
+}
+
+static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ last_complete = td_chan->last_completed_cookie;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ dev_dbg(chan2dev(chan),
+ "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
+ __func__, ret, last_complete, last_used);
+
+ return ret;
+}
+
+static void td_issue_pending(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+ spin_lock_bh(&td_chan->lock);
+
+ if (!list_empty(&td_chan->active_list))
+ /* transfer ongoing */
+ if (__td_dma_done_ack(td_chan))
+ __td_finish(td_chan);
+
+ if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
+ __td_start_next(td_chan);
+
+ spin_unlock_bh(&td_chan->lock);
+}
+
+static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long flags)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc;
+ struct scatterlist *sg;
+ unsigned int i;
+ unsigned int desc_usage = 0;
+
+ if (!sgl || !sg_len) {
+ dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
+ return NULL;
+ }
+
+ /* even channels are for RX, odd for TX */
+ if (td_chan->direction != direction) {
+ dev_err(chan2dev(chan),
+ "Requesting channel in wrong direction\n");
+ return NULL;
+ }
+
+ td_desc = td_desc_get(td_chan);
+ if (!td_desc) {
+ dev_err(chan2dev(chan), "Not enough descriptors available\n");
+ return NULL;
+ }
+
+ td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ int err;
+ if (desc_usage > td_desc->desc_list_len) {
+ dev_err(chan2dev(chan), "No descriptor space\n");
+ return NULL;
+ }
+
+ err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
+ i == (sg_len - 1));
+ if (err) {
+ dev_err(chan2dev(chan), "Failed to update desc: %d\n",
+ err);
+ td_desc_put(td_chan, td_desc);
+ return NULL;
+ }
+ desc_usage += TIMB_DMA_DESC_SIZE;
+ }
+
+ dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
+ td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ return &td_desc->txd;
+}
+
+static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc, *_td_desc;
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ /* first the easy part, put the queue into the free list */
+ spin_lock_bh(&td_chan->lock);
+ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
+ desc_node)
+ list_move(&td_desc->desc_node, &td_chan->free_list);
+
+ /* now tear down the runnning */
+ __td_finish(td_chan);
+ spin_unlock_bh(&td_chan->lock);
+
+ return 0;
+}
+
+static void td_tasklet(unsigned long data)
+{
+ struct timb_dma *td = (struct timb_dma *)data;
+ u32 isr;
+ u32 ipr;
+ u32 ier;
+ int i;
+
+ isr = ioread32(td->membase + TIMBDMA_ISR);
+ ipr = isr & __td_ier_mask(td);
+
+ /* ack the interrupts */
+ iowrite32(ipr, td->membase + TIMBDMA_ISR);
+
+ for (i = 0; i < td->dma.chancnt; i++)
+ if (ipr & (1 << i)) {
+ struct timb_dma_chan *td_chan = td->channels + i;
+ spin_lock(&td_chan->lock);
+ __td_finish(td_chan);
+ if (!list_empty(&td_chan->queue))
+ __td_start_next(td_chan);
+ spin_unlock(&td_chan->lock);
+ }
+
+ ier = __td_ier_mask(td);
+ iowrite32(ier, td->membase + TIMBDMA_IER);
+}
+
+
+static irqreturn_t td_irq(int irq, void *devid)
+{
+ struct timb_dma *td = devid;
+ u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
+
+ if (ipr) {
+ /* disable interrupts, will be re-enabled in tasklet */
+ iowrite32(0, td->membase + TIMBDMA_IER);
+
+ tasklet_schedule(&td->tasklet);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+
+static int __devinit td_probe(struct platform_device *pdev)
+{
+ struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_dma *td;
+ struct resource *iomem;
+ int irq;
+ int err;
+ int i;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ if (!request_mem_region(iomem->start, resource_size(iomem),
+ DRIVER_NAME))
+ return -EBUSY;
+
+ td = kzalloc(sizeof(struct timb_dma) +
+ sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
+ if (!td) {
+ err = -ENOMEM;
+ goto err_release_region;
+ }
+
+ dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
+
+ td->membase = ioremap(iomem->start, resource_size(iomem));
+ if (!td->membase) {
+ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ /* 32bit addressing */
+ iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
+
+ /* disable and clear any interrupts */
+ iowrite32(0x0, td->membase + TIMBDMA_IER);
+ iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
+
+ tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
+
+ err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request IRQ\n");
+ goto err_tasklet_kill;
+ }
+
+ td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
+ td->dma.device_free_chan_resources = td_free_chan_resources;
+ td->dma.device_tx_status = td_tx_status;
+ td->dma.device_issue_pending = td_issue_pending;
+
+ dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
+ td->dma.device_prep_slave_sg = td_prep_slave_sg;
+ td->dma.device_control = td_control;
+
+ td->dma.dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&td->dma.channels);
+
+ for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
+ struct timb_dma_chan *td_chan = &td->channels[i];
+ struct timb_dma_platform_data_channel *pchan =
+ pdata->channels + i;
+
+ /* even channels are RX, odd are TX */
+ if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) {
+ dev_err(&pdev->dev, "Wrong channel configuration\n");
+ err = -EINVAL;
+ goto err_tasklet_kill;
+ }
+
+ td_chan->chan.device = &td->dma;
+ td_chan->chan.cookie = 1;
+ td_chan->chan.chan_id = i;
+ spin_lock_init(&td_chan->lock);
+ INIT_LIST_HEAD(&td_chan->active_list);
+ INIT_LIST_HEAD(&td_chan->queue);
+ INIT_LIST_HEAD(&td_chan->free_list);
+
+ td_chan->descs = pchan->descriptors;
+ td_chan->desc_elems = pchan->descriptor_elements;
+ td_chan->bytes_per_line = pchan->bytes_per_line;
+ td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+
+ td_chan->membase = td->membase +
+ (i / 2) * TIMBDMA_INSTANCE_OFFSET +
+ (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
+
+ dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
+ i, td_chan->membase);
+
+ list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
+ }
+
+ err = dma_async_device_register(&td->dma);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register async device\n");
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, td);
+
+ dev_dbg(&pdev->dev, "Probe result: %d\n", err);
+ return err;
+
+err_free_irq:
+ free_irq(irq, td);
+err_tasklet_kill:
+ tasklet_kill(&td->tasklet);
+ iounmap(td->membase);
+err_free_mem:
+ kfree(td);
+err_release_region:
+ release_mem_region(iomem->start, resource_size(iomem));
+
+ return err;
+
+}
+
+static int __devexit td_remove(struct platform_device *pdev)
+{
+ struct timb_dma *td = platform_get_drvdata(pdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+
+ dma_async_device_unregister(&td->dma);
+ free_irq(irq, td);
+ tasklet_kill(&td->tasklet);
+ iounmap(td->membase);
+ kfree(td);
+ release_mem_region(iomem->start, resource_size(iomem));
+
+ platform_set_drvdata(pdev, NULL);
+
+ dev_dbg(&pdev->dev, "Removed...\n");
+ return 0;
+}
+
+static struct platform_driver td_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = td_probe,
+ .remove = __exit_p(td_remove),
+};
+
+static int __init td_init(void)
+{
+ return platform_driver_register(&td_driver);
+}
+module_init(td_init);
+
+static void __exit td_exit(void)
+{
+ platform_driver_unregister(&td_driver);
+}
+module_exit(td_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Timberdale DMA controller driver");
+MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
+MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 75fcf1ac8bb7..cbd83e362b5e 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -938,12 +938,17 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return &first->txd;
}
-static void txx9dmac_terminate_all(struct dma_chan *chan)
+static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
struct txx9dmac_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -EINVAL;
+
dev_vdbg(chan2dev(chan), "terminate_all\n");
spin_lock_bh(&dc->lock);
@@ -958,12 +963,13 @@ static void txx9dmac_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
txx9dmac_descriptor_complete(dc, desc);
+
+ return 0;
}
static enum dma_status
-txx9dmac_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
dma_cookie_t last_used;
@@ -985,10 +991,7 @@ txx9dmac_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return ret;
}
@@ -1153,8 +1156,8 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
dc->dma.dev = &pdev->dev;
dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
- dc->dma.device_terminate_all = txx9dmac_terminate_all;
- dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
+ dc->dma.device_control = txx9dmac_control;
+ dc->dma.device_tx_status = txx9dmac_tx_status;
dc->dma.device_issue_pending = txx9dmac_issue_pending;
if (pdata && pdata->memcpy_chan == ch) {
dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 55c9c59b3f71..aedef7941b22 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -69,6 +69,9 @@ config EDAC_MM_EDAC
occurred so that a particular failing memory module can be
replaced. If unsure, select 'Y'.
+config EDAC_MCE
+ bool
+
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE
@@ -166,6 +169,16 @@ config EDAC_I5400
Support for error detection and correction the Intel
i5400 MCH chipset (Seaburg).
+config EDAC_I7CORE
+ tristate "Intel i7 Core (Nehalem) processors"
+ depends on EDAC_MM_EDAC && PCI && X86
+ select EDAC_MCE
+ help
+ Support for error detection and correction the Intel
+ i7 Core (Nehalem) Integrated Memory Controller that exists on
+ newer processors like i7 Core, i7 Core Extreme, Xeon 35xx
+ and Xeon 55xx processors.
+
config EDAC_I82860
tristate "Intel 82860"
depends on EDAC_MM_EDAC && PCI && X86_32
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index bc5dc232a0fb..ca6b1bb24ccc 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -8,6 +8,7 @@
obj-$(CONFIG_EDAC) := edac_stub.o
obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
+obj-$(CONFIG_EDAC_MCE) += edac_mce.o
edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
edac_core-objs += edac_module.o edac_device_sysfs.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o
obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
+obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f2330f81cb5e..cace0a7b707a 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -294,7 +294,7 @@ static int __devinit amd76x_init_one(struct pci_dev *pdev,
{
debugf0("%s()\n", __func__);
- /* don't need to call pci_device_enable() */
+ /* don't need to call pci_enable_device() */
return amd76x_probe1(pdev, ent->driver_data);
}
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 001b2e797fb3..efca9343d26a 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -341,12 +341,30 @@ struct csrow_info {
struct channel_info *channels;
};
+struct mcidev_sysfs_group {
+ const char *name; /* group name */
+ struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */
+};
+
+struct mcidev_sysfs_group_kobj {
+ struct list_head list; /* list for all instances within a mc */
+
+ struct kobject kobj; /* kobj for the group */
+
+ struct mcidev_sysfs_group *grp; /* group description table */
+ struct mem_ctl_info *mci; /* the parent */
+};
+
/* mcidev_sysfs_attribute structure
* used for driver sysfs attributes and in mem_ctl_info
* sysfs top level entries
*/
struct mcidev_sysfs_attribute {
- struct attribute attr;
+ /* It should use either attr or grp */
+ struct attribute attr;
+ struct mcidev_sysfs_group *grp; /* Points to a group of attributes */
+
+ /* Ops for show/store values at the attribute - not used on group */
ssize_t (*show)(struct mem_ctl_info *,char *);
ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
};
@@ -424,6 +442,9 @@ struct mem_ctl_info {
/* edac sysfs device control */
struct kobject edac_mci_kobj;
+ /* list for all grp instances within a mc */
+ struct list_head grp_kobj_list;
+
/* Additional top controller level attributes, but specified
* by the low level driver.
*
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 418b65f1a1da..c200c2fd43ea 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -557,6 +557,8 @@ static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
if (mcidev_attr->show)
return mcidev_attr->show(mem_ctl_info, buffer);
@@ -569,6 +571,8 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
if (mcidev_attr->store)
return mcidev_attr->store(mem_ctl_info, buffer, count);
@@ -726,28 +730,118 @@ void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
#define EDAC_DEVICE_SYMLINK "device"
+#define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci)
+
+/* MCI show/store functions for top most object */
+static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
+ struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
+ if (mcidev_attr->show)
+ return mcidev_attr->show(mem_ctl_info, buffer);
+
+ return -EIO;
+}
+
+static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
+ struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
+ if (mcidev_attr->store)
+ return mcidev_attr->store(mem_ctl_info, buffer, count);
+
+ return -EIO;
+}
+
+/* No memory to release for this kobj */
+static void edac_inst_grp_release(struct kobject *kobj)
+{
+ struct mcidev_sysfs_group_kobj *grp;
+ struct mem_ctl_info *mci;
+
+ debugf1("%s()\n", __func__);
+
+ grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj);
+ mci = grp->mci;
+
+ kobject_put(&mci->edac_mci_kobj);
+}
+
+/* Intermediate show/store table */
+static struct sysfs_ops inst_grp_ops = {
+ .show = inst_grp_show,
+ .store = inst_grp_store
+};
+
+/* the kobj_type instance for a instance group */
+static struct kobj_type ktype_inst_grp = {
+ .release = edac_inst_grp_release,
+ .sysfs_ops = &inst_grp_ops,
+};
+
+
/*
* edac_create_mci_instance_attributes
- * create MC driver specific attributes at the topmost level
- * directory of this mci instance.
+ * create MC driver specific attributes bellow an specified kobj
+ * This routine calls itself recursively, in order to create an entire
+ * object tree.
*/
-static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci)
+static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
+ struct mcidev_sysfs_attribute *sysfs_attrib,
+ struct kobject *kobj)
{
int err;
- struct mcidev_sysfs_attribute *sysfs_attrib;
- /* point to the start of the array and iterate over it
- * adding each attribute listed to this mci instance's kobject
- */
- sysfs_attrib = mci->mc_driver_sysfs_attributes;
+ debugf1("%s()\n", __func__);
+
+ while (sysfs_attrib) {
+ if (sysfs_attrib->grp) {
+ struct mcidev_sysfs_group_kobj *grp_kobj;
+
+ grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL);
+ if (!grp_kobj)
+ return -ENOMEM;
+
+ list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
+
+ grp_kobj->grp = sysfs_attrib->grp;
+ grp_kobj->mci = mci;
+
+ debugf0("%s() grp %s, mci %p\n", __func__,
+ sysfs_attrib->grp->name, mci);
+
+ err = kobject_init_and_add(&grp_kobj->kobj,
+ &ktype_inst_grp,
+ &mci->edac_mci_kobj,
+ sysfs_attrib->grp->name);
+ if (err)
+ return err;
+
+ err = edac_create_mci_instance_attributes(mci,
+ grp_kobj->grp->mcidev_attr,
+ &grp_kobj->kobj);
+
+ if (err)
+ return err;
+ } else if (sysfs_attrib->attr.name) {
+ debugf0("%s() file %s\n", __func__,
+ sysfs_attrib->attr.name);
+
+ err = sysfs_create_file(kobj, &sysfs_attrib->attr);
+ } else
+ break;
- while (sysfs_attrib && sysfs_attrib->attr.name) {
- err = sysfs_create_file(&mci->edac_mci_kobj,
- (struct attribute*) sysfs_attrib);
if (err) {
return err;
}
-
sysfs_attrib++;
}
@@ -759,21 +853,44 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci)
* remove MC driver specific attributes at the topmost level
* directory of this mci instance.
*/
-static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci)
+static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
+ struct mcidev_sysfs_attribute *sysfs_attrib,
+ struct kobject *kobj, int count)
{
- struct mcidev_sysfs_attribute *sysfs_attrib;
+ struct mcidev_sysfs_group_kobj *grp_kobj, *tmp;
- /* point to the start of the array and iterate over it
- * adding each attribute listed to this mci instance's kobject
- */
- sysfs_attrib = mci->mc_driver_sysfs_attributes;
+ debugf1("%s()\n", __func__);
- /* loop if there are attributes and until we hit a NULL entry */
- while (sysfs_attrib && sysfs_attrib->attr.name) {
- sysfs_remove_file(&mci->edac_mci_kobj,
- (struct attribute *) sysfs_attrib);
+ /*
+ * loop if there are attributes and until we hit a NULL entry
+ * Remove first all the atributes
+ */
+ while (sysfs_attrib) {
+ if (sysfs_attrib->grp) {
+ list_for_each_entry(grp_kobj, &mci->grp_kobj_list,
+ list)
+ if (grp_kobj->grp == sysfs_attrib->grp)
+ edac_remove_mci_instance_attributes(mci,
+ grp_kobj->grp->mcidev_attr,
+ &grp_kobj->kobj, count + 1);
+ } else if (sysfs_attrib->attr.name) {
+ debugf0("%s() file %s\n", __func__,
+ sysfs_attrib->attr.name);
+ sysfs_remove_file(kobj, &sysfs_attrib->attr);
+ } else
+ break;
sysfs_attrib++;
}
+
+ /*
+ * Now that all attributes got removed, it is save to remove all groups
+ */
+ if (!count)
+ list_for_each_entry_safe(grp_kobj, tmp, &mci->grp_kobj_list,
+ list) {
+ debugf0("%s() grp %s\n", __func__, grp_kobj->grp->name);
+ kobject_put(&grp_kobj->kobj);
+ }
}
@@ -794,6 +911,8 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
+ INIT_LIST_HEAD(&mci->grp_kobj_list);
+
/* create a symlink for the device */
err = sysfs_create_link(kobj_mci, &mci->dev->kobj,
EDAC_DEVICE_SYMLINK);
@@ -806,7 +925,9 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
* then create them now for the driver.
*/
if (mci->mc_driver_sysfs_attributes) {
- err = edac_create_mci_instance_attributes(mci);
+ err = edac_create_mci_instance_attributes(mci,
+ mci->mc_driver_sysfs_attributes,
+ &mci->edac_mci_kobj);
if (err) {
debugf1("%s() failure to create mci attributes\n",
__func__);
@@ -841,7 +962,8 @@ fail1:
}
/* remove the mci instance's attributes, if any */
- edac_remove_mci_instance_attributes(mci);
+ edac_remove_mci_instance_attributes(mci,
+ mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0);
/* remove the symlink */
sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
@@ -875,8 +997,9 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s() remove_mci_instance\n", __func__);
/* remove this mci instance's attribtes */
- edac_remove_mci_instance_attributes(mci);
-
+ edac_remove_mci_instance_attributes(mci,
+ mci->mc_driver_sysfs_attributes,
+ &mci->edac_mci_kobj, 0);
debugf0("%s() unregister this mci kobj\n", __func__);
/* unregister this instance's kobject */
diff --git a/drivers/edac/edac_mce.c b/drivers/edac/edac_mce.c
new file mode 100644
index 000000000000..9ccdc5b140e7
--- /dev/null
+++ b/drivers/edac/edac_mce.c
@@ -0,0 +1,61 @@
+/* Provides edac interface to mcelog events
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License version 2.
+ *
+ * Copyright (c) 2009 by:
+ * Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ */
+
+#include <linux/module.h>
+#include <linux/edac_mce.h>
+#include <asm/mce.h>
+
+int edac_mce_enabled;
+EXPORT_SYMBOL_GPL(edac_mce_enabled);
+
+
+/*
+ * Extension interface
+ */
+
+static LIST_HEAD(edac_mce_list);
+static DEFINE_MUTEX(edac_mce_lock);
+
+int edac_mce_register(struct edac_mce *edac_mce)
+{
+ mutex_lock(&edac_mce_lock);
+ list_add_tail(&edac_mce->list, &edac_mce_list);
+ mutex_unlock(&edac_mce_lock);
+ return 0;
+}
+EXPORT_SYMBOL(edac_mce_register);
+
+void edac_mce_unregister(struct edac_mce *edac_mce)
+{
+ mutex_lock(&edac_mce_lock);
+ list_del(&edac_mce->list);
+ mutex_unlock(&edac_mce_lock);
+}
+EXPORT_SYMBOL(edac_mce_unregister);
+
+int edac_mce_parse(struct mce *mce)
+{
+ struct edac_mce *edac_mce;
+
+ list_for_each_entry(edac_mce, &edac_mce_list, list) {
+ if (edac_mce->check_error(edac_mce->priv, mce))
+ return 1;
+ }
+
+ /* Nobody queued the error */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(edac_mce_parse);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("EDAC Driver for mcelog captured errors");
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index adc10a2ac5f6..996c1bdb5a34 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -774,7 +774,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci)
static void i5000_check_error(struct mem_ctl_info *mci)
{
struct i5000_error_info info;
- debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
i5000_get_error_info(mci, &info);
i5000_process_error_info(mci, &info, 1);
}
@@ -1353,8 +1353,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
int num_dimms_per_channel;
int num_csrows;
- debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __func__,
+ debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+ __FILE__, __func__,
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
@@ -1389,7 +1389,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
return -ENOMEM;
kobject_get(&mci->edac_mci_kobj);
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
mci->dev = &pdev->dev; /* record ptr to the generic device */
@@ -1432,8 +1432,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
+ debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
+ __FILE__, __func__);
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
@@ -1478,7 +1478,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
/* wake up device */
rc = pci_enable_device(pdev);
@@ -1497,7 +1497,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s: %s()\n", __FILE__, __func__);
if (i5000_pci)
edac_pci_release_generic_ctl(i5000_pci);
@@ -1544,7 +1544,7 @@ static int __init i5000_init(void)
{
int pci_rc;
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1560,7 +1560,7 @@ static int __init i5000_init(void)
*/
static void __exit i5000_exit(void)
{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
pci_unregister_driver(&i5000_driver);
}
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index f99d10655ed4..010c1d6526f5 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -694,7 +694,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci)
static void i5400_check_error(struct mem_ctl_info *mci)
{
struct i5400_error_info info;
- debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
i5400_get_error_info(mci, &info);
i5400_process_error_info(mci, &info);
}
@@ -1227,8 +1227,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (dev_idx >= ARRAY_SIZE(i5400_devs))
return -EINVAL;
- debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __func__,
+ debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+ __FILE__, __func__,
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
@@ -1256,7 +1256,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
mci->dev = &pdev->dev; /* record ptr to the generic device */
@@ -1299,8 +1299,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
+ debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
+ __FILE__, __func__);
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
@@ -1344,7 +1344,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
/* wake up device */
rc = pci_enable_device(pdev);
@@ -1363,7 +1363,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s: %s()\n", __FILE__, __func__);
if (i5400_pci)
edac_pci_release_generic_ctl(i5400_pci);
@@ -1409,7 +1409,7 @@ static int __init i5400_init(void)
{
int pci_rc;
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1425,7 +1425,7 @@ static int __init i5400_init(void)
*/
static void __exit i5400_exit(void)
{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
pci_unregister_driver(&i5400_driver);
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
new file mode 100644
index 000000000000..6b8b7b41ec5f
--- /dev/null
+++ b/drivers/edac/i7core_edac.c
@@ -0,0 +1,2078 @@
+/* Intel i7 core/Nehalem Memory Controller kernel module
+ *
+ * This driver supports yhe memory controllers found on the Intel
+ * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
+ * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
+ * and Westmere-EP.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License version 2 only.
+ *
+ * Copyright (c) 2009-2010 by:
+ * Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ *
+ * Forked and adapted from the i5400_edac driver
+ *
+ * Based on the following public Intel datasheets:
+ * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
+ * Datasheet, Volume 2:
+ * http://download.intel.com/design/processor/datashts/320835.pdf
+ * Intel Xeon Processor 5500 Series Datasheet Volume 2
+ * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
+ * also available at:
+ * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/edac_mce.h>
+#include <linux/smp.h>
+#include <asm/processor.h>
+
+#include "edac_core.h"
+
+/*
+ * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
+ * registers start at bus 255, and are not reported by BIOS.
+ * We currently find devices with only 2 sockets. In order to support more QPI
+ * Quick Path Interconnect, just increment this number.
+ */
+#define MAX_SOCKET_BUSES 2
+
+
+/*
+ * Alter this version for the module when modifications are made
+ */
+#define I7CORE_REVISION " Ver: 1.0.0 " __DATE__
+#define EDAC_MOD_STR "i7core_edac"
+
+/*
+ * Debug macros
+ */
+#define i7core_printk(level, fmt, arg...) \
+ edac_printk(level, "i7core", fmt, ##arg)
+
+#define i7core_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
+
+/*
+ * i7core Memory Controller Registers
+ */
+
+ /* OFFSETS for Device 0 Function 0 */
+
+#define MC_CFG_CONTROL 0x90
+
+ /* OFFSETS for Device 3 Function 0 */
+
+#define MC_CONTROL 0x48
+#define MC_STATUS 0x4c
+#define MC_MAX_DOD 0x64
+
+/*
+ * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
+ * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
+ */
+
+#define MC_TEST_ERR_RCV1 0x60
+ #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
+
+#define MC_TEST_ERR_RCV0 0x64
+ #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
+ #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
+
+/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
+#define MC_COR_ECC_CNT_0 0x80
+#define MC_COR_ECC_CNT_1 0x84
+#define MC_COR_ECC_CNT_2 0x88
+#define MC_COR_ECC_CNT_3 0x8c
+#define MC_COR_ECC_CNT_4 0x90
+#define MC_COR_ECC_CNT_5 0x94
+
+#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
+#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
+
+
+ /* OFFSETS for Devices 4,5 and 6 Function 0 */
+
+#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
+ #define THREE_DIMMS_PRESENT (1 << 24)
+ #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
+ #define QUAD_RANK_PRESENT (1 << 22)
+ #define REGISTERED_DIMM (1 << 15)
+
+#define MC_CHANNEL_MAPPER 0x60
+ #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
+ #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
+
+#define MC_CHANNEL_RANK_PRESENT 0x7c
+ #define RANK_PRESENT_MASK 0xffff
+
+#define MC_CHANNEL_ADDR_MATCH 0xf0
+#define MC_CHANNEL_ERROR_MASK 0xf8
+#define MC_CHANNEL_ERROR_INJECT 0xfc
+ #define INJECT_ADDR_PARITY 0x10
+ #define INJECT_ECC 0x08
+ #define MASK_CACHELINE 0x06
+ #define MASK_FULL_CACHELINE 0x06
+ #define MASK_MSB32_CACHELINE 0x04
+ #define MASK_LSB32_CACHELINE 0x02
+ #define NO_MASK_CACHELINE 0x00
+ #define REPEAT_EN 0x01
+
+ /* OFFSETS for Devices 4,5 and 6 Function 1 */
+
+#define MC_DOD_CH_DIMM0 0x48
+#define MC_DOD_CH_DIMM1 0x4c
+#define MC_DOD_CH_DIMM2 0x50
+ #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
+ #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
+ #define DIMM_PRESENT_MASK (1 << 9)
+ #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
+ #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
+ #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
+ #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
+ #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
+ #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
+ #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
+ #define MC_DOD_NUMCOL_MASK 3
+ #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
+
+#define MC_RANK_PRESENT 0x7c
+
+#define MC_SAG_CH_0 0x80
+#define MC_SAG_CH_1 0x84
+#define MC_SAG_CH_2 0x88
+#define MC_SAG_CH_3 0x8c
+#define MC_SAG_CH_4 0x90
+#define MC_SAG_CH_5 0x94
+#define MC_SAG_CH_6 0x98
+#define MC_SAG_CH_7 0x9c
+
+#define MC_RIR_LIMIT_CH_0 0x40
+#define MC_RIR_LIMIT_CH_1 0x44
+#define MC_RIR_LIMIT_CH_2 0x48
+#define MC_RIR_LIMIT_CH_3 0x4C
+#define MC_RIR_LIMIT_CH_4 0x50
+#define MC_RIR_LIMIT_CH_5 0x54
+#define MC_RIR_LIMIT_CH_6 0x58
+#define MC_RIR_LIMIT_CH_7 0x5C
+#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
+
+#define MC_RIR_WAY_CH 0x80
+ #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
+ #define MC_RIR_WAY_RANK_MASK 0x7
+
+/*
+ * i7core structs
+ */
+
+#define NUM_CHANS 3
+#define MAX_DIMMS 3 /* Max DIMMS per channel */
+#define MAX_MCR_FUNC 4
+#define MAX_CHAN_FUNC 3
+
+struct i7core_info {
+ u32 mc_control;
+ u32 mc_status;
+ u32 max_dod;
+ u32 ch_map;
+};
+
+
+struct i7core_inject {
+ int enable;
+
+ u32 section;
+ u32 type;
+ u32 eccmask;
+
+ /* Error address mask */
+ int channel, dimm, rank, bank, page, col;
+};
+
+struct i7core_channel {
+ u32 ranks;
+ u32 dimms;
+};
+
+struct pci_id_descr {
+ int dev;
+ int func;
+ int dev_id;
+ int optional;
+};
+
+struct pci_id_table {
+ struct pci_id_descr *descr;
+ int n_devs;
+};
+
+struct i7core_dev {
+ struct list_head list;
+ u8 socket;
+ struct pci_dev **pdev;
+ int n_devs;
+ struct mem_ctl_info *mci;
+};
+
+struct i7core_pvt {
+ struct pci_dev *pci_noncore;
+ struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
+ struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
+
+ struct i7core_dev *i7core_dev;
+
+ struct i7core_info info;
+ struct i7core_inject inject;
+ struct i7core_channel channel[NUM_CHANS];
+
+ int channels; /* Number of active channels */
+
+ int ce_count_available;
+ int csrow_map[NUM_CHANS][MAX_DIMMS];
+
+ /* ECC corrected errors counts per udimm */
+ unsigned long udimm_ce_count[MAX_DIMMS];
+ int udimm_last_ce_count[MAX_DIMMS];
+ /* ECC corrected errors counts per rdimm */
+ unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
+ int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
+
+ unsigned int is_registered;
+
+ /* mcelog glue */
+ struct edac_mce edac_mce;
+
+ /* Fifo double buffers */
+ struct mce mce_entry[MCE_LOG_LEN];
+ struct mce mce_outentry[MCE_LOG_LEN];
+
+ /* Fifo in/out counters */
+ unsigned mce_in, mce_out;
+
+ /* Count indicator to show errors not got */
+ unsigned mce_overrun;
+};
+
+/* Static vars */
+static LIST_HEAD(i7core_edac_list);
+static DEFINE_MUTEX(i7core_edac_lock);
+
+#define PCI_DESCR(device, function, device_id) \
+ .dev = (device), \
+ .func = (function), \
+ .dev_id = (device_id)
+
+struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
+ /* Memory controller */
+ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
+ { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
+ /* Exists only for RDIMM */
+ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
+ { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
+
+ /* Channel 0 */
+ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
+ { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
+ { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
+ { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
+
+ /* Channel 1 */
+ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
+ { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
+ { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
+ { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
+
+ /* Channel 2 */
+ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
+ { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
+ { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
+ { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
+
+ /* Generic Non-core registers */
+ /*
+ * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
+ * On Xeon 55xx, however, it has a different id (8086:2c40). So,
+ * the probing code needs to test for the other address in case of
+ * failure of this one
+ */
+ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
+
+};
+
+struct pci_id_descr pci_dev_descr_lynnfield[] = {
+ { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
+ { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
+ { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
+
+ { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
+ { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
+ { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
+ { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
+
+ { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
+ { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
+ { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
+ { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
+
+ /*
+ * This is the PCI device has an alternate address on some
+ * processors like Core i7 860
+ */
+ { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
+};
+
+struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
+ /* Memory controller */
+ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
+ { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
+ /* Exists only for RDIMM */
+ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
+ { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
+
+ /* Channel 0 */
+ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
+ { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
+ { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
+ { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
+
+ /* Channel 1 */
+ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
+ { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
+ { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
+ { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
+
+ /* Channel 2 */
+ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
+ { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
+ { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
+ { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
+
+ /* Generic Non-core registers */
+ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
+
+};
+
+#define PCI_ID_TABLE_ENTRY(A) { A, ARRAY_SIZE(A) }
+struct pci_id_table pci_dev_table[] = {
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
+};
+
+/*
+ * pci_device_id table for which devices we are looking for
+ */
+static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
+ {0,} /* 0 terminated list. */
+};
+
+static struct edac_pci_ctl_info *i7core_pci;
+
+/****************************************************************************
+ Anciliary status routines
+ ****************************************************************************/
+
+ /* MC_CONTROL bits */
+#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
+#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
+
+ /* MC_STATUS bits */
+#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
+#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
+
+ /* MC_MAX_DOD read functions */
+static inline int numdimms(u32 dimms)
+{
+ return (dimms & 0x3) + 1;
+}
+
+static inline int numrank(u32 rank)
+{
+ static int ranks[4] = { 1, 2, 4, -EINVAL };
+
+ return ranks[rank & 0x3];
+}
+
+static inline int numbank(u32 bank)
+{
+ static int banks[4] = { 4, 8, 16, -EINVAL };
+
+ return banks[bank & 0x3];
+}
+
+static inline int numrow(u32 row)
+{
+ static int rows[8] = {
+ 1 << 12, 1 << 13, 1 << 14, 1 << 15,
+ 1 << 16, -EINVAL, -EINVAL, -EINVAL,
+ };
+
+ return rows[row & 0x7];
+}
+
+static inline int numcol(u32 col)
+{
+ static int cols[8] = {
+ 1 << 10, 1 << 11, 1 << 12, -EINVAL,
+ };
+ return cols[col & 0x3];
+}
+
+static struct i7core_dev *get_i7core_dev(u8 socket)
+{
+ struct i7core_dev *i7core_dev;
+
+ list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
+ if (i7core_dev->socket == socket)
+ return i7core_dev;
+ }
+
+ return NULL;
+}
+
+/****************************************************************************
+ Memory check routines
+ ****************************************************************************/
+static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
+ unsigned func)
+{
+ struct i7core_dev *i7core_dev = get_i7core_dev(socket);
+ int i;
+
+ if (!i7core_dev)
+ return NULL;
+
+ for (i = 0; i < i7core_dev->n_devs; i++) {
+ if (!i7core_dev->pdev[i])
+ continue;
+
+ if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
+ PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
+ return i7core_dev->pdev[i];
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * i7core_get_active_channels() - gets the number of channels and csrows
+ * @socket: Quick Path Interconnect socket
+ * @channels: Number of channels that will be returned
+ * @csrows: Number of csrows found
+ *
+ * Since EDAC core needs to know in advance the number of available channels
+ * and csrows, in order to allocate memory for csrows/channels, it is needed
+ * to run two similar steps. At the first step, implemented on this function,
+ * it checks the number of csrows/channels present at one socket.
+ * this is used in order to properly allocate the size of mci components.
+ *
+ * It should be noticed that none of the current available datasheets explain
+ * or even mention how csrows are seen by the memory controller. So, we need
+ * to add a fake description for csrows.
+ * So, this driver is attributing one DIMM memory for one csrow.
+ */
+static int i7core_get_active_channels(u8 socket, unsigned *channels,
+ unsigned *csrows)
+{
+ struct pci_dev *pdev = NULL;
+ int i, j;
+ u32 status, control;
+
+ *channels = 0;
+ *csrows = 0;
+
+ pdev = get_pdev_slot_func(socket, 3, 0);
+ if (!pdev) {
+ i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
+ socket);
+ return -ENODEV;
+ }
+
+ /* Device 3 function 0 reads */
+ pci_read_config_dword(pdev, MC_STATUS, &status);
+ pci_read_config_dword(pdev, MC_CONTROL, &control);
+
+ for (i = 0; i < NUM_CHANS; i++) {
+ u32 dimm_dod[3];
+ /* Check if the channel is active */
+ if (!(control & (1 << (8 + i))))
+ continue;
+
+ /* Check if the channel is disabled */
+ if (status & (1 << i))
+ continue;
+
+ pdev = get_pdev_slot_func(socket, i + 4, 1);
+ if (!pdev) {
+ i7core_printk(KERN_ERR, "Couldn't find socket %d "
+ "fn %d.%d!!!\n",
+ socket, i + 4, 1);
+ return -ENODEV;
+ }
+ /* Devices 4-6 function 1 */
+ pci_read_config_dword(pdev,
+ MC_DOD_CH_DIMM0, &dimm_dod[0]);
+ pci_read_config_dword(pdev,
+ MC_DOD_CH_DIMM1, &dimm_dod[1]);
+ pci_read_config_dword(pdev,
+ MC_DOD_CH_DIMM2, &dimm_dod[2]);
+
+ (*channels)++;
+
+ for (j = 0; j < 3; j++) {
+ if (!DIMM_PRESENT(dimm_dod[j]))
+ continue;
+ (*csrows)++;
+ }
+ }
+
+ debugf0("Number of active channels on socket %d: %d\n",
+ socket, *channels);
+
+ return 0;
+}
+
+static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ struct csrow_info *csr;
+ struct pci_dev *pdev;
+ int i, j;
+ unsigned long last_page = 0;
+ enum edac_type mode;
+ enum mem_type mtype;
+
+ /* Get data from the MC register, function 0 */
+ pdev = pvt->pci_mcr[0];
+ if (!pdev)
+ return -ENODEV;
+
+ /* Device 3 function 0 reads */
+ pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
+ pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
+ pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
+ pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
+
+ debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
+ pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
+ pvt->info.max_dod, pvt->info.ch_map);
+
+ if (ECC_ENABLED(pvt)) {
+ debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
+ if (ECCx8(pvt))
+ mode = EDAC_S8ECD8ED;
+ else
+ mode = EDAC_S4ECD4ED;
+ } else {
+ debugf0("ECC disabled\n");
+ mode = EDAC_NONE;
+ }
+
+ /* FIXME: need to handle the error codes */
+ debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
+ "x%x x 0x%x\n",
+ numdimms(pvt->info.max_dod),
+ numrank(pvt->info.max_dod >> 2),
+ numbank(pvt->info.max_dod >> 4),
+ numrow(pvt->info.max_dod >> 6),
+ numcol(pvt->info.max_dod >> 9));
+
+ for (i = 0; i < NUM_CHANS; i++) {
+ u32 data, dimm_dod[3], value[8];
+
+ if (!pvt->pci_ch[i][0])
+ continue;
+
+ if (!CH_ACTIVE(pvt, i)) {
+ debugf0("Channel %i is not active\n", i);
+ continue;
+ }
+ if (CH_DISABLED(pvt, i)) {
+ debugf0("Channel %i is disabled\n", i);
+ continue;
+ }
+
+ /* Devices 4-6 function 0 */
+ pci_read_config_dword(pvt->pci_ch[i][0],
+ MC_CHANNEL_DIMM_INIT_PARAMS, &data);
+
+ pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
+ 4 : 2;
+
+ if (data & REGISTERED_DIMM)
+ mtype = MEM_RDDR3;
+ else
+ mtype = MEM_DDR3;
+#if 0
+ if (data & THREE_DIMMS_PRESENT)
+ pvt->channel[i].dimms = 3;
+ else if (data & SINGLE_QUAD_RANK_PRESENT)
+ pvt->channel[i].dimms = 1;
+ else
+ pvt->channel[i].dimms = 2;
+#endif
+
+ /* Devices 4-6 function 1 */
+ pci_read_config_dword(pvt->pci_ch[i][1],
+ MC_DOD_CH_DIMM0, &dimm_dod[0]);
+ pci_read_config_dword(pvt->pci_ch[i][1],
+ MC_DOD_CH_DIMM1, &dimm_dod[1]);
+ pci_read_config_dword(pvt->pci_ch[i][1],
+ MC_DOD_CH_DIMM2, &dimm_dod[2]);
+
+ debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
+ "%d ranks, %cDIMMs\n",
+ i,
+ RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
+ data,
+ pvt->channel[i].ranks,
+ (data & REGISTERED_DIMM) ? 'R' : 'U');
+
+ for (j = 0; j < 3; j++) {
+ u32 banks, ranks, rows, cols;
+ u32 size, npages;
+
+ if (!DIMM_PRESENT(dimm_dod[j]))
+ continue;
+
+ banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
+ ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
+ rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
+ cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
+
+ /* DDR3 has 8 I/O banks */
+ size = (rows * cols * banks * ranks) >> (20 - 3);
+
+ pvt->channel[i].dimms++;
+
+ debugf0("\tdimm %d %d Mb offset: %x, "
+ "bank: %d, rank: %d, row: %#x, col: %#x\n",
+ j, size,
+ RANKOFFSET(dimm_dod[j]),
+ banks, ranks, rows, cols);
+
+#if PAGE_SHIFT > 20
+ npages = size >> (PAGE_SHIFT - 20);
+#else
+ npages = size << (20 - PAGE_SHIFT);
+#endif
+
+ csr = &mci->csrows[*csrow];
+ csr->first_page = last_page + 1;
+ last_page += npages;
+ csr->last_page = last_page;
+ csr->nr_pages = npages;
+
+ csr->page_mask = 0;
+ csr->grain = 8;
+ csr->csrow_idx = *csrow;
+ csr->nr_channels = 1;
+
+ csr->channels[0].chan_idx = i;
+ csr->channels[0].ce_count = 0;
+
+ pvt->csrow_map[i][j] = *csrow;
+
+ switch (banks) {
+ case 4:
+ csr->dtype = DEV_X4;
+ break;
+ case 8:
+ csr->dtype = DEV_X8;
+ break;
+ case 16:
+ csr->dtype = DEV_X16;
+ break;
+ default:
+ csr->dtype = DEV_UNKNOWN;
+ }
+
+ csr->edac_mode = mode;
+ csr->mtype = mtype;
+
+ (*csrow)++;
+ }
+
+ pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
+ pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
+ pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
+ pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
+ pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
+ pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
+ pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
+ pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
+ debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
+ for (j = 0; j < 8; j++)
+ debugf1("\t\t%#x\t%#x\t%#x\n",
+ (value[j] >> 27) & 0x1,
+ (value[j] >> 24) & 0x7,
+ (value[j] && ((1 << 24) - 1)));
+ }
+
+ return 0;
+}
+
+/****************************************************************************
+ Error insertion routines
+ ****************************************************************************/
+
+/* The i7core has independent error injection features per channel.
+ However, to have a simpler code, we don't allow enabling error injection
+ on more than one channel.
+ Also, since a change at an inject parameter will be applied only at enable,
+ we're disabling error injection on all write calls to the sysfs nodes that
+ controls the error code injection.
+ */
+static int disable_inject(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+
+ pvt->inject.enable = 0;
+
+ if (!pvt->pci_ch[pvt->inject.channel][0])
+ return -ENODEV;
+
+ pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_INJECT, 0);
+
+ return 0;
+}
+
+/*
+ * i7core inject inject.section
+ *
+ * accept and store error injection inject.section value
+ * bit 0 - refers to the lower 32-byte half cacheline
+ * bit 1 - refers to the upper 32-byte half cacheline
+ */
+static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int rc;
+
+ if (pvt->inject.enable)
+ disable_inject(mci);
+
+ rc = strict_strtoul(data, 10, &value);
+ if ((rc < 0) || (value > 3))
+ return -EIO;
+
+ pvt->inject.section = (u32) value;
+ return count;
+}
+
+static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ return sprintf(data, "0x%08x\n", pvt->inject.section);
+}
+
+/*
+ * i7core inject.type
+ *
+ * accept and store error injection inject.section value
+ * bit 0 - repeat enable - Enable error repetition
+ * bit 1 - inject ECC error
+ * bit 2 - inject parity error
+ */
+static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int rc;
+
+ if (pvt->inject.enable)
+ disable_inject(mci);
+
+ rc = strict_strtoul(data, 10, &value);
+ if ((rc < 0) || (value > 7))
+ return -EIO;
+
+ pvt->inject.type = (u32) value;
+ return count;
+}
+
+static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ return sprintf(data, "0x%08x\n", pvt->inject.type);
+}
+
+/*
+ * i7core_inject_inject.eccmask_store
+ *
+ * The type of error (UE/CE) will depend on the inject.eccmask value:
+ * Any bits set to a 1 will flip the corresponding ECC bit
+ * Correctable errors can be injected by flipping 1 bit or the bits within
+ * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
+ * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
+ * uncorrectable error to be injected.
+ */
+static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int rc;
+
+ if (pvt->inject.enable)
+ disable_inject(mci);
+
+ rc = strict_strtoul(data, 10, &value);
+ if (rc < 0)
+ return -EIO;
+
+ pvt->inject.eccmask = (u32) value;
+ return count;
+}
+
+static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
+}
+
+/*
+ * i7core_addrmatch
+ *
+ * The type of error (UE/CE) will depend on the inject.eccmask value:
+ * Any bits set to a 1 will flip the corresponding ECC bit
+ * Correctable errors can be injected by flipping 1 bit or the bits within
+ * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
+ * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
+ * uncorrectable error to be injected.
+ */
+
+#define DECLARE_ADDR_MATCH(param, limit) \
+static ssize_t i7core_inject_store_##param( \
+ struct mem_ctl_info *mci, \
+ const char *data, size_t count) \
+{ \
+ struct i7core_pvt *pvt; \
+ long value; \
+ int rc; \
+ \
+ debugf1("%s()\n", __func__); \
+ pvt = mci->pvt_info; \
+ \
+ if (pvt->inject.enable) \
+ disable_inject(mci); \
+ \
+ if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
+ value = -1; \
+ else { \
+ rc = strict_strtoul(data, 10, &value); \
+ if ((rc < 0) || (value >= limit)) \
+ return -EIO; \
+ } \
+ \
+ pvt->inject.param = value; \
+ \
+ return count; \
+} \
+ \
+static ssize_t i7core_inject_show_##param( \
+ struct mem_ctl_info *mci, \
+ char *data) \
+{ \
+ struct i7core_pvt *pvt; \
+ \
+ pvt = mci->pvt_info; \
+ debugf1("%s() pvt=%p\n", __func__, pvt); \
+ if (pvt->inject.param < 0) \
+ return sprintf(data, "any\n"); \
+ else \
+ return sprintf(data, "%d\n", pvt->inject.param);\
+}
+
+#define ATTR_ADDR_MATCH(param) \
+ { \
+ .attr = { \
+ .name = #param, \
+ .mode = (S_IRUGO | S_IWUSR) \
+ }, \
+ .show = i7core_inject_show_##param, \
+ .store = i7core_inject_store_##param, \
+ }
+
+DECLARE_ADDR_MATCH(channel, 3);
+DECLARE_ADDR_MATCH(dimm, 3);
+DECLARE_ADDR_MATCH(rank, 4);
+DECLARE_ADDR_MATCH(bank, 32);
+DECLARE_ADDR_MATCH(page, 0x10000);
+DECLARE_ADDR_MATCH(col, 0x4000);
+
+static int write_and_test(struct pci_dev *dev, int where, u32 val)
+{
+ u32 read;
+ int count;
+
+ debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
+ dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
+ where, val);
+
+ for (count = 0; count < 10; count++) {
+ if (count)
+ msleep(100);
+ pci_write_config_dword(dev, where, val);
+ pci_read_config_dword(dev, where, &read);
+
+ if (read == val)
+ return 0;
+ }
+
+ i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
+ "write=%08x. Read=%08x\n",
+ dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
+ where, val, read);
+
+ return -EINVAL;
+}
+
+/*
+ * This routine prepares the Memory Controller for error injection.
+ * The error will be injected when some process tries to write to the
+ * memory that matches the given criteria.
+ * The criteria can be set in terms of a mask where dimm, rank, bank, page
+ * and col can be specified.
+ * A -1 value for any of the mask items will make the MCU to ignore
+ * that matching criteria for error injection.
+ *
+ * It should be noticed that the error will only happen after a write operation
+ * on a memory that matches the condition. if REPEAT_EN is not enabled at
+ * inject mask, then it will produce just one error. Otherwise, it will repeat
+ * until the injectmask would be cleaned.
+ *
+ * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
+ * is reliable enough to check if the MC is using the
+ * three channels. However, this is not clear at the datasheet.
+ */
+static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 injectmask;
+ u64 mask = 0;
+ int rc;
+ long enable;
+
+ if (!pvt->pci_ch[pvt->inject.channel][0])
+ return 0;
+
+ rc = strict_strtoul(data, 10, &enable);
+ if ((rc < 0))
+ return 0;
+
+ if (enable) {
+ pvt->inject.enable = 1;
+ } else {
+ disable_inject(mci);
+ return count;
+ }
+
+ /* Sets pvt->inject.dimm mask */
+ if (pvt->inject.dimm < 0)
+ mask |= 1LL << 41;
+ else {
+ if (pvt->channel[pvt->inject.channel].dimms > 2)
+ mask |= (pvt->inject.dimm & 0x3LL) << 35;
+ else
+ mask |= (pvt->inject.dimm & 0x1LL) << 36;
+ }
+
+ /* Sets pvt->inject.rank mask */
+ if (pvt->inject.rank < 0)
+ mask |= 1LL << 40;
+ else {
+ if (pvt->channel[pvt->inject.channel].dimms > 2)
+ mask |= (pvt->inject.rank & 0x1LL) << 34;
+ else
+ mask |= (pvt->inject.rank & 0x3LL) << 34;
+ }
+
+ /* Sets pvt->inject.bank mask */
+ if (pvt->inject.bank < 0)
+ mask |= 1LL << 39;
+ else
+ mask |= (pvt->inject.bank & 0x15LL) << 30;
+
+ /* Sets pvt->inject.page mask */
+ if (pvt->inject.page < 0)
+ mask |= 1LL << 38;
+ else
+ mask |= (pvt->inject.page & 0xffff) << 14;
+
+ /* Sets pvt->inject.column mask */
+ if (pvt->inject.col < 0)
+ mask |= 1LL << 37;
+ else
+ mask |= (pvt->inject.col & 0x3fff);
+
+ /*
+ * bit 0: REPEAT_EN
+ * bits 1-2: MASK_HALF_CACHELINE
+ * bit 3: INJECT_ECC
+ * bit 4: INJECT_ADDR_PARITY
+ */
+
+ injectmask = (pvt->inject.type & 1) |
+ (pvt->inject.section & 0x3) << 1 |
+ (pvt->inject.type & 0x6) << (3 - 1);
+
+ /* Unlock writes to registers - this register is write only */
+ pci_write_config_dword(pvt->pci_noncore,
+ MC_CFG_CONTROL, 0x2);
+
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ADDR_MATCH, mask);
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
+
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
+
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_INJECT, injectmask);
+
+ /*
+ * This is something undocumented, based on my tests
+ * Without writing 8 to this register, errors aren't injected. Not sure
+ * why.
+ */
+ pci_write_config_dword(pvt->pci_noncore,
+ MC_CFG_CONTROL, 8);
+
+ debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
+ " inject 0x%08x\n",
+ mask, pvt->inject.eccmask, injectmask);
+
+
+ return count;
+}
+
+static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 injectmask;
+
+ if (!pvt->pci_ch[pvt->inject.channel][0])
+ return 0;
+
+ pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_INJECT, &injectmask);
+
+ debugf0("Inject error read: 0x%018x\n", injectmask);
+
+ if (injectmask & 0x0c)
+ pvt->inject.enable = 1;
+
+ return sprintf(data, "%d\n", pvt->inject.enable);
+}
+
+#define DECLARE_COUNTER(param) \
+static ssize_t i7core_show_counter_##param( \
+ struct mem_ctl_info *mci, \
+ char *data) \
+{ \
+ struct i7core_pvt *pvt = mci->pvt_info; \
+ \
+ debugf1("%s() \n", __func__); \
+ if (!pvt->ce_count_available || (pvt->is_registered)) \
+ return sprintf(data, "data unavailable\n"); \
+ return sprintf(data, "%lu\n", \
+ pvt->udimm_ce_count[param]); \
+}
+
+#define ATTR_COUNTER(param) \
+ { \
+ .attr = { \
+ .name = __stringify(udimm##param), \
+ .mode = (S_IRUGO | S_IWUSR) \
+ }, \
+ .show = i7core_show_counter_##param \
+ }
+
+DECLARE_COUNTER(0);
+DECLARE_COUNTER(1);
+DECLARE_COUNTER(2);
+
+/*
+ * Sysfs struct
+ */
+
+
+static struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
+ ATTR_ADDR_MATCH(channel),
+ ATTR_ADDR_MATCH(dimm),
+ ATTR_ADDR_MATCH(rank),
+ ATTR_ADDR_MATCH(bank),
+ ATTR_ADDR_MATCH(page),
+ ATTR_ADDR_MATCH(col),
+ { .attr = { .name = NULL } }
+};
+
+static struct mcidev_sysfs_group i7core_inject_addrmatch = {
+ .name = "inject_addrmatch",
+ .mcidev_attr = i7core_addrmatch_attrs,
+};
+
+static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
+ ATTR_COUNTER(0),
+ ATTR_COUNTER(1),
+ ATTR_COUNTER(2),
+};
+
+static struct mcidev_sysfs_group i7core_udimm_counters = {
+ .name = "all_channel_counts",
+ .mcidev_attr = i7core_udimm_counters_attrs,
+};
+
+static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = {
+ {
+ .attr = {
+ .name = "inject_section",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_section_show,
+ .store = i7core_inject_section_store,
+ }, {
+ .attr = {
+ .name = "inject_type",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_type_show,
+ .store = i7core_inject_type_store,
+ }, {
+ .attr = {
+ .name = "inject_eccmask",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_eccmask_show,
+ .store = i7core_inject_eccmask_store,
+ }, {
+ .grp = &i7core_inject_addrmatch,
+ }, {
+ .attr = {
+ .name = "inject_enable",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_enable_show,
+ .store = i7core_inject_enable_store,
+ },
+ { .attr = { .name = NULL } }, /* Reserved for udimm counters */
+ { .attr = { .name = NULL } }
+};
+
+/****************************************************************************
+ Device initialization routines: put/get, init/exit
+ ****************************************************************************/
+
+/*
+ * i7core_put_devices 'put' all the devices that we have
+ * reserved via 'get'
+ */
+static void i7core_put_devices(struct i7core_dev *i7core_dev)
+{
+ int i;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+ for (i = 0; i < i7core_dev->n_devs; i++) {
+ struct pci_dev *pdev = i7core_dev->pdev[i];
+ if (!pdev)
+ continue;
+ debugf0("Removing dev %02x:%02x.%d\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ pci_dev_put(pdev);
+ }
+ kfree(i7core_dev->pdev);
+ list_del(&i7core_dev->list);
+ kfree(i7core_dev);
+}
+
+static void i7core_put_all_devices(void)
+{
+ struct i7core_dev *i7core_dev, *tmp;
+
+ list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list)
+ i7core_put_devices(i7core_dev);
+}
+
+static void __init i7core_xeon_pci_fixup(struct pci_id_table *table)
+{
+ struct pci_dev *pdev = NULL;
+ int i;
+ /*
+ * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
+ * aren't announced by acpi. So, we need to use a legacy scan probing
+ * to detect them
+ */
+ while (table && table->descr) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
+ if (unlikely(!pdev)) {
+ for (i = 0; i < MAX_SOCKET_BUSES; i++)
+ pcibios_scan_specific_bus(255-i);
+ }
+ table++;
+ }
+}
+
+/*
+ * i7core_get_devices Find and perform 'get' operation on the MCH's
+ * device/functions we want to reference for this driver
+ *
+ * Need to 'get' device 16 func 1 and func 2
+ */
+int i7core_get_onedevice(struct pci_dev **prev, int devno,
+ struct pci_id_descr *dev_descr, unsigned n_devs)
+{
+ struct i7core_dev *i7core_dev;
+
+ struct pci_dev *pdev = NULL;
+ u8 bus = 0;
+ u8 socket = 0;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ dev_descr->dev_id, *prev);
+
+ /*
+ * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
+ * is at addr 8086:2c40, instead of 8086:2c41. So, we need
+ * to probe for the alternate address in case of failure
+ */
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
+
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
+ *prev);
+
+ if (!pdev) {
+ if (*prev) {
+ *prev = pdev;
+ return 0;
+ }
+
+ if (dev_descr->optional)
+ return 0;
+
+ if (devno == 0)
+ return -ENODEV;
+
+ i7core_printk(KERN_ERR,
+ "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
+ dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+
+ /* End of list, leave */
+ return -ENODEV;
+ }
+ bus = pdev->bus->number;
+
+ if (bus == 0x3f)
+ socket = 0;
+ else
+ socket = 255 - bus;
+
+ i7core_dev = get_i7core_dev(socket);
+ if (!i7core_dev) {
+ i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
+ if (!i7core_dev)
+ return -ENOMEM;
+ i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * n_devs,
+ GFP_KERNEL);
+ if (!i7core_dev->pdev) {
+ kfree(i7core_dev);
+ return -ENOMEM;
+ }
+ i7core_dev->socket = socket;
+ i7core_dev->n_devs = n_devs;
+ list_add_tail(&i7core_dev->list, &i7core_edac_list);
+ }
+
+ if (i7core_dev->pdev[devno]) {
+ i7core_printk(KERN_ERR,
+ "Duplicated device for "
+ "dev %02x:%02x.%d PCI ID %04x:%04x\n",
+ bus, dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ pci_dev_put(pdev);
+ return -ENODEV;
+ }
+
+ i7core_dev->pdev[devno] = pdev;
+
+ /* Sanity check */
+ if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
+ PCI_FUNC(pdev->devfn) != dev_descr->func)) {
+ i7core_printk(KERN_ERR,
+ "Device PCI ID %04x:%04x "
+ "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
+ bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ bus, dev_descr->dev, dev_descr->func);
+ return -ENODEV;
+ }
+
+ /* Be sure that the device is enabled */
+ if (unlikely(pci_enable_device(pdev) < 0)) {
+ i7core_printk(KERN_ERR,
+ "Couldn't enable "
+ "dev %02x:%02x.%d PCI ID %04x:%04x\n",
+ bus, dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ return -ENODEV;
+ }
+
+ debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
+ socket, bus, dev_descr->dev,
+ dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+
+ *prev = pdev;
+
+ return 0;
+}
+
+static int i7core_get_devices(struct pci_id_table *table)
+{
+ int i, rc;
+ struct pci_dev *pdev = NULL;
+ struct pci_id_descr *dev_descr;
+
+ while (table && table->descr) {
+ dev_descr = table->descr;
+ for (i = 0; i < table->n_devs; i++) {
+ pdev = NULL;
+ do {
+ rc = i7core_get_onedevice(&pdev, i, &dev_descr[i],
+ table->n_devs);
+ if (rc < 0) {
+ if (i == 0) {
+ i = table->n_devs;
+ break;
+ }
+ i7core_put_all_devices();
+ return -ENODEV;
+ }
+ } while (pdev);
+ }
+ table++;
+ }
+
+ return 0;
+ return 0;
+}
+
+static int mci_bind_devs(struct mem_ctl_info *mci,
+ struct i7core_dev *i7core_dev)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pdev;
+ int i, func, slot;
+
+ /* Associates i7core_dev and mci for future usage */
+ pvt->i7core_dev = i7core_dev;
+ i7core_dev->mci = mci;
+
+ pvt->is_registered = 0;
+ for (i = 0; i < i7core_dev->n_devs; i++) {
+ pdev = i7core_dev->pdev[i];
+ if (!pdev)
+ continue;
+
+ func = PCI_FUNC(pdev->devfn);
+ slot = PCI_SLOT(pdev->devfn);
+ if (slot == 3) {
+ if (unlikely(func > MAX_MCR_FUNC))
+ goto error;
+ pvt->pci_mcr[func] = pdev;
+ } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
+ if (unlikely(func > MAX_CHAN_FUNC))
+ goto error;
+ pvt->pci_ch[slot - 4][func] = pdev;
+ } else if (!slot && !func)
+ pvt->pci_noncore = pdev;
+ else
+ goto error;
+
+ debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pdev, i7core_dev->socket);
+
+ if (PCI_SLOT(pdev->devfn) == 3 &&
+ PCI_FUNC(pdev->devfn) == 2)
+ pvt->is_registered = 1;
+ }
+
+ /*
+ * Add extra nodes to count errors on udimm
+ * For registered memory, this is not needed, since the counters
+ * are already displayed at the standard locations
+ */
+ if (!pvt->is_registered)
+ i7core_sysfs_attrs[ARRAY_SIZE(i7core_sysfs_attrs)-2].grp =
+ &i7core_udimm_counters;
+
+ return 0;
+
+error:
+ i7core_printk(KERN_ERR, "Device %d, function %d "
+ "is out of the expected range\n",
+ slot, func);
+ return -EINVAL;
+}
+
+/****************************************************************************
+ Error check routines
+ ****************************************************************************/
+static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
+ int chan, int dimm, int add)
+{
+ char *msg;
+ struct i7core_pvt *pvt = mci->pvt_info;
+ int row = pvt->csrow_map[chan][dimm], i;
+
+ for (i = 0; i < add; i++) {
+ msg = kasprintf(GFP_KERNEL, "Corrected error "
+ "(Socket=%d channel=%d dimm=%d)",
+ pvt->i7core_dev->socket, chan, dimm);
+
+ edac_mc_handle_fbd_ce(mci, row, 0, msg);
+ kfree (msg);
+ }
+}
+
+static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
+ int chan, int new0, int new1, int new2)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ int add0 = 0, add1 = 0, add2 = 0;
+ /* Updates CE counters if it is not the first time here */
+ if (pvt->ce_count_available) {
+ /* Updates CE counters */
+
+ add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
+ add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
+ add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
+
+ if (add2 < 0)
+ add2 += 0x7fff;
+ pvt->rdimm_ce_count[chan][2] += add2;
+
+ if (add1 < 0)
+ add1 += 0x7fff;
+ pvt->rdimm_ce_count[chan][1] += add1;
+
+ if (add0 < 0)
+ add0 += 0x7fff;
+ pvt->rdimm_ce_count[chan][0] += add0;
+ } else
+ pvt->ce_count_available = 1;
+
+ /* Store the new values */
+ pvt->rdimm_last_ce_count[chan][2] = new2;
+ pvt->rdimm_last_ce_count[chan][1] = new1;
+ pvt->rdimm_last_ce_count[chan][0] = new0;
+
+ /*updated the edac core */
+ if (add0 != 0)
+ i7core_rdimm_update_csrow(mci, chan, 0, add0);
+ if (add1 != 0)
+ i7core_rdimm_update_csrow(mci, chan, 1, add1);
+ if (add2 != 0)
+ i7core_rdimm_update_csrow(mci, chan, 2, add2);
+
+}
+
+static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 rcv[3][2];
+ int i, new0, new1, new2;
+
+ /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
+ &rcv[0][0]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
+ &rcv[0][1]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
+ &rcv[1][0]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
+ &rcv[1][1]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
+ &rcv[2][0]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
+ &rcv[2][1]);
+ for (i = 0 ; i < 3; i++) {
+ debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
+ (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
+ /*if the channel has 3 dimms*/
+ if (pvt->channel[i].dimms > 2) {
+ new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
+ new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
+ new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
+ } else {
+ new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
+ DIMM_BOT_COR_ERR(rcv[i][0]);
+ new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
+ DIMM_BOT_COR_ERR(rcv[i][1]);
+ new2 = 0;
+ }
+
+ i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
+ }
+}
+
+/* This function is based on the device 3 function 4 registers as described on:
+ * Intel Xeon Processor 5500 Series Datasheet Volume 2
+ * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
+ * also available at:
+ * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
+ */
+static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 rcv1, rcv0;
+ int new0, new1, new2;
+
+ if (!pvt->pci_mcr[4]) {
+ debugf0("%s MCR registers not found\n", __func__);
+ return;
+ }
+
+ /* Corrected test errors */
+ pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
+ pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
+
+ /* Store the new values */
+ new2 = DIMM2_COR_ERR(rcv1);
+ new1 = DIMM1_COR_ERR(rcv0);
+ new0 = DIMM0_COR_ERR(rcv0);
+
+ /* Updates CE counters if it is not the first time here */
+ if (pvt->ce_count_available) {
+ /* Updates CE counters */
+ int add0, add1, add2;
+
+ add2 = new2 - pvt->udimm_last_ce_count[2];
+ add1 = new1 - pvt->udimm_last_ce_count[1];
+ add0 = new0 - pvt->udimm_last_ce_count[0];
+
+ if (add2 < 0)
+ add2 += 0x7fff;
+ pvt->udimm_ce_count[2] += add2;
+
+ if (add1 < 0)
+ add1 += 0x7fff;
+ pvt->udimm_ce_count[1] += add1;
+
+ if (add0 < 0)
+ add0 += 0x7fff;
+ pvt->udimm_ce_count[0] += add0;
+
+ if (add0 | add1 | add2)
+ i7core_printk(KERN_ERR, "New Corrected error(s): "
+ "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
+ add0, add1, add2);
+ } else
+ pvt->ce_count_available = 1;
+
+ /* Store the new values */
+ pvt->udimm_last_ce_count[2] = new2;
+ pvt->udimm_last_ce_count[1] = new1;
+ pvt->udimm_last_ce_count[0] = new0;
+}
+
+/*
+ * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
+ * Architectures Software Developer’s Manual Volume 3B.
+ * Nehalem are defined as family 0x06, model 0x1a
+ *
+ * The MCA registers used here are the following ones:
+ * struct mce field MCA Register
+ * m->status MSR_IA32_MC8_STATUS
+ * m->addr MSR_IA32_MC8_ADDR
+ * m->misc MSR_IA32_MC8_MISC
+ * In the case of Nehalem, the error information is masked at .status and .misc
+ * fields
+ */
+static void i7core_mce_output_error(struct mem_ctl_info *mci,
+ struct mce *m)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ char *type, *optype, *err, *msg;
+ unsigned long error = m->status & 0x1ff0000l;
+ u32 optypenum = (m->status >> 4) & 0x07;
+ u32 core_err_cnt = (m->status >> 38) && 0x7fff;
+ u32 dimm = (m->misc >> 16) & 0x3;
+ u32 channel = (m->misc >> 18) & 0x3;
+ u32 syndrome = m->misc >> 32;
+ u32 errnum = find_first_bit(&error, 32);
+ int csrow;
+
+ if (m->mcgstatus & 1)
+ type = "FATAL";
+ else
+ type = "NON_FATAL";
+
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request";
+ break;
+ case 1:
+ optype = "read error";
+ break;
+ case 2:
+ optype = "write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "scrubbing error";
+ break;
+ default:
+ optype = "reserved";
+ break;
+ }
+
+ switch (errnum) {
+ case 16:
+ err = "read ECC error";
+ break;
+ case 17:
+ err = "RAS ECC error";
+ break;
+ case 18:
+ err = "write parity error";
+ break;
+ case 19:
+ err = "redundacy loss";
+ break;
+ case 20:
+ err = "reserved";
+ break;
+ case 21:
+ err = "memory range error";
+ break;
+ case 22:
+ err = "RTID out of range";
+ break;
+ case 23:
+ err = "address parity error";
+ break;
+ case 24:
+ err = "byte enable parity error";
+ break;
+ default:
+ err = "unknown";
+ }
+
+ /* FIXME: should convert addr into bank and rank information */
+ msg = kasprintf(GFP_ATOMIC,
+ "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
+ "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
+ type, (long long) m->addr, m->cpu, dimm, channel,
+ syndrome, core_err_cnt, (long long)m->status,
+ (long long)m->misc, optype, err);
+
+ debugf0("%s", msg);
+
+ csrow = pvt->csrow_map[channel][dimm];
+
+ /* Call the helper to output message */
+ if (m->mcgstatus & 1)
+ edac_mc_handle_fbd_ue(mci, csrow, 0,
+ 0 /* FIXME: should be channel here */, msg);
+ else if (!pvt->is_registered)
+ edac_mc_handle_fbd_ce(mci, csrow,
+ 0 /* FIXME: should be channel here */, msg);
+
+ kfree(msg);
+}
+
+/*
+ * i7core_check_error Retrieve and process errors reported by the
+ * hardware. Called by the Core module.
+ */
+static void i7core_check_error(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ int i;
+ unsigned count = 0;
+ struct mce *m;
+
+ /*
+ * MCE first step: Copy all mce errors into a temporary buffer
+ * We use a double buffering here, to reduce the risk of
+ * loosing an error.
+ */
+ smp_rmb();
+ count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
+ % MCE_LOG_LEN;
+ if (!count)
+ goto check_ce_error;
+
+ m = pvt->mce_outentry;
+ if (pvt->mce_in + count > MCE_LOG_LEN) {
+ unsigned l = MCE_LOG_LEN - pvt->mce_in;
+
+ memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
+ smp_wmb();
+ pvt->mce_in = 0;
+ count -= l;
+ m += l;
+ }
+ memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
+ smp_wmb();
+ pvt->mce_in += count;
+
+ smp_rmb();
+ if (pvt->mce_overrun) {
+ i7core_printk(KERN_ERR, "Lost %d memory errors\n",
+ pvt->mce_overrun);
+ smp_wmb();
+ pvt->mce_overrun = 0;
+ }
+
+ /*
+ * MCE second step: parse errors and display
+ */
+ for (i = 0; i < count; i++)
+ i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
+
+ /*
+ * Now, let's increment CE error counts
+ */
+check_ce_error:
+ if (!pvt->is_registered)
+ i7core_udimm_check_mc_ecc_err(mci);
+ else
+ i7core_rdimm_check_mc_ecc_err(mci);
+}
+
+/*
+ * i7core_mce_check_error Replicates mcelog routine to get errors
+ * This routine simply queues mcelog errors, and
+ * return. The error itself should be handled later
+ * by i7core_check_error.
+ * WARNING: As this routine should be called at NMI time, extra care should
+ * be taken to avoid deadlocks, and to be as fast as possible.
+ */
+static int i7core_mce_check_error(void *priv, struct mce *mce)
+{
+ struct mem_ctl_info *mci = priv;
+ struct i7core_pvt *pvt = mci->pvt_info;
+
+ /*
+ * Just let mcelog handle it if the error is
+ * outside the memory controller
+ */
+ if (((mce->status & 0xffff) >> 7) != 1)
+ return 0;
+
+ /* Bank 8 registers are the only ones that we know how to handle */
+ if (mce->bank != 8)
+ return 0;
+
+#ifdef CONFIG_SMP
+ /* Only handle if it is the right mc controller */
+ if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
+ return 0;
+#endif
+
+ smp_rmb();
+ if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
+ smp_wmb();
+ pvt->mce_overrun++;
+ return 0;
+ }
+
+ /* Copy memory error at the ringbuffer */
+ memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
+ smp_wmb();
+ pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
+
+ /* Handle fatal errors immediately */
+ if (mce->mcgstatus & 1)
+ i7core_check_error(mci);
+
+ /* Advice mcelog that the error were handled */
+ return 1;
+}
+
+static int i7core_register_mci(struct i7core_dev *i7core_dev,
+ int num_channels, int num_csrows)
+{
+ struct mem_ctl_info *mci;
+ struct i7core_pvt *pvt;
+ int csrow = 0;
+ int rc;
+
+ /* allocate a new MC control structure */
+ mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels,
+ i7core_dev->socket);
+ if (unlikely(!mci))
+ return -ENOMEM;
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+ /* record ptr to the generic device */
+ mci->dev = &i7core_dev->pdev[0]->dev;
+
+ pvt = mci->pvt_info;
+ memset(pvt, 0, sizeof(*pvt));
+
+ /*
+ * FIXME: how to handle RDDR3 at MCI level? It is possible to have
+ * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
+ * memory channels
+ */
+ mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "i7core_edac.c";
+ mci->mod_ver = I7CORE_REVISION;
+ mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
+ i7core_dev->socket);
+ mci->dev_name = pci_name(i7core_dev->pdev[0]);
+ mci->ctl_page_to_phys = NULL;
+ mci->mc_driver_sysfs_attributes = i7core_sysfs_attrs;
+ /* Set the function pointer to an actual operation function */
+ mci->edac_check = i7core_check_error;
+
+ /* Store pci devices at mci for faster access */
+ rc = mci_bind_devs(mci, i7core_dev);
+ if (unlikely(rc < 0))
+ goto fail;
+
+ /* Get dimm basic config */
+ get_dimm_config(mci, &csrow);
+
+ /* add this new MC control structure to EDAC's list of MCs */
+ if (unlikely(edac_mc_add_mc(mci))) {
+ debugf0("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ /* FIXME: perhaps some code should go here that disables error
+ * reporting if we just enabled it
+ */
+
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ /* allocating generic PCI control info */
+ i7core_pci = edac_pci_create_generic_ctl(&i7core_dev->pdev[0]->dev,
+ EDAC_MOD_STR);
+ if (unlikely(!i7core_pci)) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ /* Default error mask is any memory */
+ pvt->inject.channel = 0;
+ pvt->inject.dimm = -1;
+ pvt->inject.rank = -1;
+ pvt->inject.bank = -1;
+ pvt->inject.page = -1;
+ pvt->inject.col = -1;
+
+ /* Registers on edac_mce in order to receive memory errors */
+ pvt->edac_mce.priv = mci;
+ pvt->edac_mce.check_error = i7core_mce_check_error;
+
+ rc = edac_mce_register(&pvt->edac_mce);
+ if (unlikely(rc < 0)) {
+ debugf0("MC: " __FILE__
+ ": %s(): failed edac_mce_register()\n", __func__);
+ }
+
+fail:
+ if (rc < 0)
+ edac_mc_free(mci);
+ return rc;
+}
+
+/*
+ * i7core_probe Probe for ONE instance of device to see if it is
+ * present.
+ * return:
+ * 0 for FOUND a device
+ * < 0 for error code
+ */
+static int __devinit i7core_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int dev_idx = id->driver_data;
+ int rc;
+ struct i7core_dev *i7core_dev;
+
+ /*
+ * All memory controllers are allocated at the first pass.
+ */
+ if (unlikely(dev_idx >= 1))
+ return -EINVAL;
+
+ /* get the pci devices we want to reserve for our use */
+ mutex_lock(&i7core_edac_lock);
+
+ rc = i7core_get_devices(pci_dev_table);
+ if (unlikely(rc < 0))
+ goto fail0;
+
+ list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
+ int channels;
+ int csrows;
+
+ /* Check the number of active and not disabled channels */
+ rc = i7core_get_active_channels(i7core_dev->socket,
+ &channels, &csrows);
+ if (unlikely(rc < 0))
+ goto fail1;
+
+ rc = i7core_register_mci(i7core_dev, channels, csrows);
+ if (unlikely(rc < 0))
+ goto fail1;
+ }
+
+ i7core_printk(KERN_INFO, "Driver loaded.\n");
+
+ mutex_unlock(&i7core_edac_lock);
+ return 0;
+
+fail1:
+ i7core_put_all_devices();
+fail0:
+ mutex_unlock(&i7core_edac_lock);
+ return rc;
+}
+
+/*
+ * i7core_remove destructor for one instance of device
+ *
+ */
+static void __devexit i7core_remove(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct i7core_dev *i7core_dev, *tmp;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if (i7core_pci)
+ edac_pci_release_generic_ctl(i7core_pci);
+
+ /*
+ * we have a trouble here: pdev value for removal will be wrong, since
+ * it will point to the X58 register used to detect that the machine
+ * is a Nehalem or upper design. However, due to the way several PCI
+ * devices are grouped together to provide MC functionality, we need
+ * to use a different method for releasing the devices
+ */
+
+ mutex_lock(&i7core_edac_lock);
+ list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
+ mci = edac_mc_del_mc(&i7core_dev->pdev[0]->dev);
+ if (mci) {
+ struct i7core_pvt *pvt = mci->pvt_info;
+
+ i7core_dev = pvt->i7core_dev;
+ edac_mce_unregister(&pvt->edac_mce);
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ i7core_put_devices(i7core_dev);
+ } else {
+ i7core_printk(KERN_ERR,
+ "Couldn't find mci for socket %d\n",
+ i7core_dev->socket);
+ }
+ }
+ mutex_unlock(&i7core_edac_lock);
+}
+
+MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
+
+/*
+ * i7core_driver pci_driver structure for this module
+ *
+ */
+static struct pci_driver i7core_driver = {
+ .name = "i7core_edac",
+ .probe = i7core_probe,
+ .remove = __devexit_p(i7core_remove),
+ .id_table = i7core_pci_tbl,
+};
+
+/*
+ * i7core_init Module entry function
+ * Try to initialize this module for its devices
+ */
+static int __init i7core_init(void)
+{
+ int pci_rc;
+
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ i7core_xeon_pci_fixup(pci_dev_table);
+
+ pci_rc = pci_register_driver(&i7core_driver);
+
+ if (pci_rc >= 0)
+ return 0;
+
+ i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
+ pci_rc);
+
+ return pci_rc;
+}
+
+/*
+ * i7core_exit() Module exit function
+ * Unregister the driver
+ */
+static void __exit i7core_exit(void)
+{
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&i7core_driver);
+}
+
+module_init(i7core_init);
+module_exit(i7core_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
+ I7CORE_REVISION);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 7f3884fcbd46..a2fa1feed724 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
{
struct i82443bxgx_edacmc_error_info info;
- debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
i82443bxgx_edacmc_get_error_info(mci, &info);
i82443bxgx_edacmc_process_error_info(mci, &info, 1);
}
@@ -198,13 +198,13 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
- debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n",
- mci->mc_idx, __func__, index, drbar);
+ debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
+ mci->mc_idx, __FILE__, __func__, index, drbar);
row_high_limit = ((u32) drbar << 23);
/* find the DRAM Chip Select Base address and mask */
- debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
- "Boundry Address=%#0x, Last = %#0x \n",
- mci->mc_idx, __func__, index, row_high_limit,
+ debugf1("MC%d: %s: %s() Row=%d, "
+ "Boundry Address=%#0x, Last = %#0x\n",
+ mci->mc_idx, __FILE__, __func__, index, row_high_limit,
row_high_limit_last);
/* 440GX goes to 2GB, represented with a DRB of 0. */
@@ -237,7 +237,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
enum mem_type mtype;
enum edac_type edac_mode;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
/* Something is really hosed if PCI config space reads from
* the MC aren't working.
@@ -250,7 +250,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
@@ -336,7 +336,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
__func__);
}
- debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ debugf3("MC: %s: %s(): success\n", __FILE__, __func__);
return 0;
fail:
@@ -352,9 +352,9 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
- /* don't need to call pci_device_enable() */
+ /* don't need to call pci_enable_device() */
rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
if (mci_pdev == NULL)
@@ -367,7 +367,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s: %s()\n", __FILE__, __func__);
if (i82443bxgx_pci)
edac_pci_release_generic_ctl(i82443bxgx_pci);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 4471647b4807..52ca09bf4726 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -229,7 +229,7 @@ static int __devinit mpc85xx_pci_err_probe(struct of_device *op,
pdata->edac_idx = edac_pci_idx++;
- res = of_address_to_resource(op->node, 0, &r);
+ res = of_address_to_resource(op->dev.of_node, 0, &r);
if (res) {
printk(KERN_ERR "%s: Unable to get resource for "
"PCI err regs\n", __func__);
@@ -274,7 +274,7 @@ static int __devinit mpc85xx_pci_err_probe(struct of_device *op,
}
if (edac_op_state == EDAC_OPSTATE_INT) {
- pdata->irq = irq_of_parse_and_map(op->node, 0);
+ pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
mpc85xx_pci_isr, IRQF_DISABLED,
"[EDAC] PCI err", pci);
@@ -338,15 +338,13 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = {
};
static struct of_platform_driver mpc85xx_pci_err_driver = {
- .owner = THIS_MODULE,
- .name = "mpc85xx_pci_err",
- .match_table = mpc85xx_pci_err_of_match,
.probe = mpc85xx_pci_err_probe,
.remove = __devexit_p(mpc85xx_pci_err_remove),
.driver = {
- .name = "mpc85xx_pci_err",
- .owner = THIS_MODULE,
- },
+ .name = "mpc85xx_pci_err",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc85xx_pci_err_of_match,
+ },
};
#endif /* CONFIG_PCI */
@@ -531,7 +529,7 @@ static int __devinit mpc85xx_l2_err_probe(struct of_device *op,
edac_dev->ctl_name = pdata->name;
edac_dev->dev_name = pdata->name;
- res = of_address_to_resource(op->node, 0, &r);
+ res = of_address_to_resource(op->dev.of_node, 0, &r);
if (res) {
printk(KERN_ERR "%s: Unable to get resource for "
"L2 err regs\n", __func__);
@@ -578,7 +576,7 @@ static int __devinit mpc85xx_l2_err_probe(struct of_device *op,
}
if (edac_op_state == EDAC_OPSTATE_INT) {
- pdata->irq = irq_of_parse_and_map(op->node, 0);
+ pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
mpc85xx_l2_isr, IRQF_DISABLED,
"[EDAC] L2 err", edac_dev);
@@ -654,15 +652,13 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
};
static struct of_platform_driver mpc85xx_l2_err_driver = {
- .owner = THIS_MODULE,
- .name = "mpc85xx_l2_err",
- .match_table = mpc85xx_l2_err_of_match,
.probe = mpc85xx_l2_err_probe,
.remove = mpc85xx_l2_err_remove,
.driver = {
- .name = "mpc85xx_l2_err",
- .owner = THIS_MODULE,
- },
+ .name = "mpc85xx_l2_err",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc85xx_l2_err_of_match,
+ },
};
/**************************** MC Err device ***************************/
@@ -982,7 +978,7 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
mci->ctl_name = pdata->name;
mci->dev_name = pdata->name;
- res = of_address_to_resource(op->node, 0, &r);
+ res = of_address_to_resource(op->dev.of_node, 0, &r);
if (res) {
printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
__func__);
@@ -1056,7 +1052,7 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
/* register interrupts */
- pdata->irq = irq_of_parse_and_map(op->node, 0);
+ pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
mpc85xx_mc_isr,
IRQF_DISABLED | IRQF_SHARED,
@@ -1131,15 +1127,13 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
};
static struct of_platform_driver mpc85xx_mc_err_driver = {
- .owner = THIS_MODULE,
- .name = "mpc85xx_mc_err",
- .match_table = mpc85xx_mc_err_of_match,
.probe = mpc85xx_mc_err_probe,
.remove = mpc85xx_mc_err_remove,
.driver = {
- .name = "mpc85xx_mc_err",
- .owner = THIS_MODULE,
- },
+ .name = "mpc85xx_mc_err",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc85xx_mc_err_of_match,
+ },
};
#ifdef CONFIG_MPC85xx
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 11f2172aa1e6..e78839e89a06 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -202,13 +202,13 @@ static struct of_device_id ppc4xx_edac_match[] = {
};
static struct of_platform_driver ppc4xx_edac_driver = {
- .match_table = ppc4xx_edac_match,
.probe = ppc4xx_edac_probe,
.remove = ppc4xx_edac_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = PPC4XX_EDAC_MODULE_NAME
- }
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = PPC4XX_EDAC_MODULE_NAME
+ .of_match_table = ppc4xx_edac_match,
+ },
};
/*
@@ -1022,7 +1022,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
int status = 0;
const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
struct ppc4xx_edac_pdata *pdata = NULL;
- const struct device_node *np = op->node;
+ const struct device_node *np = op->dev.of_node;
if (match == NULL)
return -EINVAL;
@@ -1113,7 +1113,7 @@ ppc4xx_edac_register_irq(struct of_device *op, struct mem_ctl_info *mci)
int status = 0;
int ded_irq, sec_irq;
struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
- struct device_node *np = op->node;
+ struct device_node *np = op->dev.of_node;
ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX);
sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX);
@@ -1243,7 +1243,7 @@ ppc4xx_edac_probe(struct of_device *op, const struct of_device_id *match)
int status = 0;
u32 mcopt1, memcheck;
dcr_host_t dcr_host;
- const struct device_node *np = op->node;
+ const struct device_node *np = op->dev.of_node;
struct mem_ctl_info *mci = NULL;
static int ppc4xx_edac_instance;
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index d55f8e9de788..6a822c631ef5 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -354,7 +354,7 @@ static int __devinit r82600_init_one(struct pci_dev *pdev,
{
debugf0("%s()\n", __func__);
- /* don't need to call pci_device_enable() */
+ /* don't need to call pci_enable_device() */
return r82600_probe1(pdev, ent->driver_data);
}
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 5045156c5313..371713ff0266 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
-#include <linux/timer.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
@@ -63,7 +62,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_CRC(v) ((v) << 0)
#define BIB_CRC_LENGTH(v) ((v) << 16)
#define BIB_INFO_LENGTH(v) ((v) << 24)
-
+#define BIB_BUS_NAME 0x31333934 /* "1394" */
#define BIB_LINK_SPEED(v) ((v) << 0)
#define BIB_GENERATION(v) ((v) << 4)
#define BIB_MAX_ROM(v) ((v) << 8)
@@ -73,7 +72,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_BMC ((1) << 28)
#define BIB_ISC ((1) << 29)
#define BIB_CMC ((1) << 30)
-#define BIB_IMC ((1) << 31)
+#define BIB_IRMC ((1) << 31)
+#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
{
@@ -91,18 +91,18 @@ static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
config_rom[0] = cpu_to_be32(
BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
- config_rom[1] = cpu_to_be32(0x31333934);
+ config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
config_rom[2] = cpu_to_be32(
BIB_LINK_SPEED(card->link_speed) |
BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
BIB_MAX_ROM(2) |
BIB_MAX_RECEIVE(card->max_receive) |
- BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC);
+ BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
config_rom[3] = cpu_to_be32(card->guid >> 32);
config_rom[4] = cpu_to_be32(card->guid);
/* Generate root directory. */
- config_rom[6] = cpu_to_be32(0x0c0083c0); /* node capabilities */
+ config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
i = 7;
j = 7 + descriptor_count;
@@ -231,7 +231,7 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
static void fw_card_bm_work(struct work_struct *work)
{
struct fw_card *card = container_of(work, struct fw_card, work.work);
- struct fw_device *root_device;
+ struct fw_device *root_device, *irm_device;
struct fw_node *root_node;
unsigned long flags;
int root_id, new_root_id, irm_id, local_id;
@@ -239,6 +239,7 @@ static void fw_card_bm_work(struct work_struct *work)
bool do_reset = false;
bool root_device_is_running;
bool root_device_is_cmc;
+ bool irm_is_1394_1995_only;
spin_lock_irqsave(&card->lock, flags);
@@ -248,12 +249,18 @@ static void fw_card_bm_work(struct work_struct *work)
}
generation = card->generation;
+
root_node = card->root_node;
fw_node_get(root_node);
root_device = root_node->data;
root_device_is_running = root_device &&
atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
root_device_is_cmc = root_device && root_device->cmc;
+
+ irm_device = card->irm_node->data;
+ irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
+ (irm_device->config_rom[2] & 0x000000f0) == 0;
+
root_id = root_node->node_id;
irm_id = card->irm_node->node_id;
local_id = card->local_node->node_id;
@@ -276,8 +283,15 @@ static void fw_card_bm_work(struct work_struct *work)
if (!card->irm_node->link_on) {
new_root_id = local_id;
- fw_notify("IRM has link off, making local node (%02x) root.\n",
- new_root_id);
+ fw_notify("%s, making local node (%02x) root.\n",
+ "IRM has link off", new_root_id);
+ goto pick_me;
+ }
+
+ if (irm_is_1394_1995_only) {
+ new_root_id = local_id;
+ fw_notify("%s, making local node (%02x) root.\n",
+ "IRM is not 1394a compliant", new_root_id);
goto pick_me;
}
@@ -316,8 +330,8 @@ static void fw_card_bm_work(struct work_struct *work)
* root, and thus, IRM.
*/
new_root_id = local_id;
- fw_notify("BM lock failed, making local node (%02x) root.\n",
- new_root_id);
+ fw_notify("%s, making local node (%02x) root.\n",
+ "BM lock failed", new_root_id);
goto pick_me;
}
} else if (card->bm_generation != generation) {
@@ -407,13 +421,6 @@ static void fw_card_bm_work(struct work_struct *work)
fw_card_put(card);
}
-static void flush_timer_callback(unsigned long data)
-{
- struct fw_card *card = (struct fw_card *)data;
-
- fw_flush_transactions(card);
-}
-
void fw_card_initialize(struct fw_card *card,
const struct fw_card_driver *driver,
struct device *device)
@@ -432,8 +439,6 @@ void fw_card_initialize(struct fw_card *card,
init_completion(&card->done);
INIT_LIST_HEAD(&card->transaction_list);
spin_lock_init(&card->lock);
- setup_timer(&card->flush_timer,
- flush_timer_callback, (unsigned long)card);
card->local_node = NULL;
@@ -558,7 +563,6 @@ void fw_core_remove_card(struct fw_card *card)
wait_for_completion(&card->done);
WARN_ON(!list_empty(&card->transaction_list));
- del_timer_sync(&card->flush_timer);
}
EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 14a34d99eea2..5bf106b9d791 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -227,7 +227,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
list_add_tail(&client->link, &device->client_list);
mutex_unlock(&device->client_list_mutex);
- return 0;
+ return nonseekable_open(inode, file);
}
static void queue_event(struct client *client, struct event *event,
@@ -1496,13 +1496,13 @@ static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
const struct file_operations fw_device_ops = {
.owner = THIS_MODULE,
+ .llseek = no_llseek,
.open = fw_device_op_open,
.read = fw_device_op_read,
.unlocked_ioctl = fw_device_op_ioctl,
- .poll = fw_device_op_poll,
- .release = fw_device_op_release,
.mmap = fw_device_op_mmap,
-
+ .release = fw_device_op_release,
+ .poll = fw_device_op_poll,
#ifdef CONFIG_COMPAT
.compat_ioctl = fw_device_op_compat_ioctl,
#endif
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 673b03f8b4ec..fdc33ff06dc1 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,7 +81,7 @@ static int close_transaction(struct fw_transaction *transaction,
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
- list_del(&t->link);
+ list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
}
@@ -89,6 +89,7 @@ static int close_transaction(struct fw_transaction *transaction,
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) {
+ del_timer_sync(&t->split_timeout_timer);
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
@@ -121,6 +122,31 @@ int fw_cancel_transaction(struct fw_card *card,
}
EXPORT_SYMBOL(fw_cancel_transaction);
+static void split_transaction_timeout_callback(unsigned long data)
+{
+ struct fw_transaction *t = (struct fw_transaction *)data;
+ struct fw_card *card = t->card;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ if (list_empty(&t->link)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ return;
+ }
+ list_del(&t->link);
+ card->tlabel_mask &= ~(1ULL << t->tlabel);
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ card->driver->cancel_packet(card, &t->packet);
+
+ /*
+ * At this point cancel_packet will never call the transaction
+ * callback, since we just took the transaction out of the list.
+ * So do it here.
+ */
+ t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
+}
+
static void transmit_complete_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
@@ -229,6 +255,23 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
packet->payload_mapped = false;
}
+static int allocate_tlabel(struct fw_card *card)
+{
+ int tlabel;
+
+ tlabel = card->current_tlabel;
+ while (card->tlabel_mask & (1ULL << tlabel)) {
+ tlabel = (tlabel + 1) & 0x3f;
+ if (tlabel == card->current_tlabel)
+ return -EBUSY;
+ }
+
+ card->current_tlabel = (tlabel + 1) & 0x3f;
+ card->tlabel_mask |= 1ULL << tlabel;
+
+ return tlabel;
+}
+
/**
* This function provides low-level access to the IEEE1394 transaction
* logic. Most C programs would use either fw_read(), fw_write() or
@@ -277,31 +320,26 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
int tlabel;
/*
- * Bump the flush timer up 100ms first of all so we
- * don't race with a flush timer callback.
- */
-
- mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
-
- /*
* Allocate tlabel from the bitmap and put the transaction on
* the list while holding the card spinlock.
*/
spin_lock_irqsave(&card->lock, flags);
- tlabel = card->current_tlabel;
- if (card->tlabel_mask & (1ULL << tlabel)) {
+ tlabel = allocate_tlabel(card);
+ if (tlabel < 0) {
spin_unlock_irqrestore(&card->lock, flags);
callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
return;
}
- card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
- card->tlabel_mask |= (1ULL << tlabel);
-
t->node_id = destination_id;
t->tlabel = tlabel;
+ t->card = card;
+ setup_timer(&t->split_timeout_timer,
+ split_transaction_timeout_callback, (unsigned long)t);
+ /* FIXME: start this timer later, relative to t->timestamp */
+ mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10));
t->callback = callback;
t->callback_data = callback_data;
@@ -347,11 +385,13 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
struct transaction_callback_data d;
struct fw_transaction t;
+ init_timer_on_stack(&t.split_timeout_timer);
init_completion(&d.done);
d.payload = payload;
fw_send_request(card, &t, tcode, destination_id, generation, speed,
offset, payload, length, transaction_callback, &d);
wait_for_completion(&d.done);
+ destroy_timer_on_stack(&t.split_timeout_timer);
return d.rcode;
}
@@ -394,30 +434,6 @@ void fw_send_phy_config(struct fw_card *card,
mutex_unlock(&phy_config_mutex);
}
-void fw_flush_transactions(struct fw_card *card)
-{
- struct fw_transaction *t, *next;
- struct list_head list;
- unsigned long flags;
-
- INIT_LIST_HEAD(&list);
- spin_lock_irqsave(&card->lock, flags);
- list_splice_init(&card->transaction_list, &list);
- card->tlabel_mask = 0;
- spin_unlock_irqrestore(&card->lock, flags);
-
- list_for_each_entry_safe(t, next, &list, link) {
- card->driver->cancel_packet(card, &t->packet);
-
- /*
- * At this point cancel_packet will never call the
- * transaction callback, since we just took all the
- * transactions out of the list. So do it here.
- */
- t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
- }
-}
-
static struct fw_address_handler *lookup_overlapping_address_handler(
struct list_head *list, unsigned long long offset, size_t length)
{
@@ -827,8 +843,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
- list_del(&t->link);
- card->tlabel_mask &= ~(1 << t->tlabel);
+ list_del_init(&t->link);
+ card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
}
}
@@ -869,6 +885,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
+ del_timer_sync(&t->split_timeout_timer);
+
/*
* The response handler may be executed while the request handler
* is still pending. Cancel the request handler.
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index fb0321300cce..0ecfcd95f4c5 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,7 +27,12 @@ struct fw_packet;
#define PHY_LINK_ACTIVE 0x80
#define PHY_CONTENDER 0x40
#define PHY_BUS_RESET 0x40
+#define PHY_EXTENDED_REGISTERS 0xe0
#define PHY_BUS_SHORT_RESET 0x40
+#define PHY_INT_STATUS_BITS 0x3c
+#define PHY_ENABLE_ACCEL 0x02
+#define PHY_ENABLE_MULTI 0x01
+#define PHY_PAGE_SELECT 0xe0
#define BANDWIDTH_AVAILABLE_INITIAL 4915
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
@@ -215,7 +220,6 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length);
-void fw_flush_transactions(struct fw_card *card);
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index a3b083a7403a..9f627e758cfc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -236,13 +236,15 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
#define QUIRK_BE_HEADERS 4
+#define QUIRK_NO_1394A 8
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
unsigned short vendor, device, flags;
} ohci_quirks[] = {
{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
- QUIRK_RESET_PACKET},
+ QUIRK_RESET_PACKET |
+ QUIRK_NO_1394A},
{PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
@@ -257,15 +259,16 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
+ ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
")");
-#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
-
#define OHCI_PARAM_DEBUG_AT_AR 1
#define OHCI_PARAM_DEBUG_SELFIDS 2
#define OHCI_PARAM_DEBUG_IRQS 4
#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
+#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
+
static int param_debug;
module_param_named(debug, param_debug, int, 0644);
MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
@@ -438,9 +441,10 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
#else
-#define log_irqs(evt)
-#define log_selfids(node_id, generation, self_id_count, sid)
-#define log_ar_at_event(dir, speed, header, evt)
+#define param_debug 0
+static inline void log_irqs(u32 evt) {}
+static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
+static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
@@ -460,27 +464,71 @@ static inline void flush_writes(const struct fw_ohci *ohci)
reg_read(ohci, OHCI1394_Version);
}
-static int ohci_update_phy_reg(struct fw_card *card, int addr,
- int clear_bits, int set_bits)
+static int read_phy_reg(struct fw_ohci *ohci, int addr)
{
- struct fw_ohci *ohci = fw_ohci(card);
- u32 val, old;
+ u32 val;
+ int i;
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
- flush_writes(ohci);
- msleep(2);
- val = reg_read(ohci, OHCI1394_PhyControl);
- if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
- fw_error("failed to set phy reg bits.\n");
- return -EBUSY;
+ for (i = 0; i < 10; i++) {
+ val = reg_read(ohci, OHCI1394_PhyControl);
+ if (val & OHCI1394_PhyControl_ReadDone)
+ return OHCI1394_PhyControl_ReadData(val);
+
+ msleep(1);
}
+ fw_error("failed to read phy reg\n");
+
+ return -EBUSY;
+}
+
+static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
+{
+ int i;
- old = OHCI1394_PhyControl_ReadData(val);
- old = (old & ~clear_bits) | set_bits;
reg_write(ohci, OHCI1394_PhyControl,
- OHCI1394_PhyControl_Write(addr, old));
+ OHCI1394_PhyControl_Write(addr, val));
+ for (i = 0; i < 100; i++) {
+ val = reg_read(ohci, OHCI1394_PhyControl);
+ if (!(val & OHCI1394_PhyControl_WritePending))
+ return 0;
- return 0;
+ msleep(1);
+ }
+ fw_error("failed to write phy reg\n");
+
+ return -EBUSY;
+}
+
+static int ohci_update_phy_reg(struct fw_card *card, int addr,
+ int clear_bits, int set_bits)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ int ret;
+
+ ret = read_phy_reg(ohci, addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The interrupt status bits are cleared by writing a one bit.
+ * Avoid clearing them unless explicitly requested in set_bits.
+ */
+ if (addr == 5)
+ clear_bits |= PHY_INT_STATUS_BITS;
+
+ return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
+}
+
+static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
+{
+ int ret;
+
+ ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5);
+ if (ret < 0)
+ return ret;
+
+ return read_phy_reg(ohci, addr);
}
static int ar_context_add_page(struct ar_context *ctx)
@@ -1495,13 +1543,64 @@ static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
}
+static int configure_1394a_enhancements(struct fw_ohci *ohci)
+{
+ bool enable_1394a;
+ int ret, clear, set, offset;
+
+ /* Check if the driver should configure link and PHY. */
+ if (!(reg_read(ohci, OHCI1394_HCControlSet) &
+ OHCI1394_HCControl_programPhyEnable))
+ return 0;
+
+ /* Paranoia: check whether the PHY supports 1394a, too. */
+ enable_1394a = false;
+ ret = read_phy_reg(ohci, 2);
+ if (ret < 0)
+ return ret;
+ if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
+ ret = read_paged_phy_reg(ohci, 1, 8);
+ if (ret < 0)
+ return ret;
+ if (ret >= 1)
+ enable_1394a = true;
+ }
+
+ if (ohci->quirks & QUIRK_NO_1394A)
+ enable_1394a = false;
+
+ /* Configure PHY and link consistently. */
+ if (enable_1394a) {
+ clear = 0;
+ set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
+ } else {
+ clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
+ set = 0;
+ }
+ ret = ohci_update_phy_reg(&ohci->card, 5, clear, set);
+ if (ret < 0)
+ return ret;
+
+ if (enable_1394a)
+ offset = OHCI1394_HCControlSet;
+ else
+ offset = OHCI1394_HCControlClear;
+ reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
+
+ /* Clean up: configuration has been taken care of. */
+ reg_write(ohci, OHCI1394_HCControlClear,
+ OHCI1394_HCControl_programPhyEnable);
+
+ return 0;
+}
+
static int ohci_enable(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
struct fw_ohci *ohci = fw_ohci(card);
struct pci_dev *dev = to_pci_dev(card->device);
u32 lps;
- int i;
+ int i, ret;
if (software_reset(ohci)) {
fw_error("Failed to reset ohci card.\n");
@@ -1565,10 +1664,14 @@ static int ohci_enable(struct fw_card *card,
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+ ret = configure_1394a_enhancements(ohci);
+ if (ret < 0)
+ return ret;
+
/* Activate link_on bit and contender bit in our self ID packets.*/
- if (ohci_update_phy_reg(card, 4, 0,
- PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
- return -EIO;
+ ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
+ if (ret < 0)
+ return ret;
/*
* When the link is not yet enabled, the atomic config rom
@@ -2304,7 +2407,7 @@ static const struct fw_card_driver ohci_driver = {
};
#ifdef CONFIG_PPC_PMAC
-static void ohci_pmac_on(struct pci_dev *dev)
+static void pmac_ohci_on(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2316,7 +2419,7 @@ static void ohci_pmac_on(struct pci_dev *dev)
}
}
-static void ohci_pmac_off(struct pci_dev *dev)
+static void pmac_ohci_off(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2328,15 +2431,15 @@ static void ohci_pmac_off(struct pci_dev *dev)
}
}
#else
-#define ohci_pmac_on(dev)
-#define ohci_pmac_off(dev)
+static inline void pmac_ohci_on(struct pci_dev *dev) {}
+static inline void pmac_ohci_off(struct pci_dev *dev) {}
#endif /* CONFIG_PPC_PMAC */
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
- u32 bus_options, max_receive, link_speed, version;
+ u32 bus_options, max_receive, link_speed, version, link_enh;
u64 guid;
int i, err, n_ir, n_it;
size_t size;
@@ -2349,7 +2452,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
- ohci_pmac_on(dev);
+ pmac_ohci_on(dev);
err = pci_enable_device(dev);
if (err) {
@@ -2389,6 +2492,23 @@ static int __devinit pci_probe(struct pci_dev *dev,
if (param_quirks)
ohci->quirks = param_quirks;
+ /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
+ if (dev->vendor == PCI_VENDOR_ID_TI) {
+ pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
+
+ /* adjust latency of ATx FIFO: use 1.7 KB threshold */
+ link_enh &= ~TI_LinkEnh_atx_thresh_mask;
+ link_enh |= TI_LinkEnh_atx_thresh_1_7K;
+
+ /* use priority arbitration for asynchronous responses */
+ link_enh |= TI_LinkEnh_enab_unfair;
+
+ /* required for aPhyEnhanceEnable to work */
+ link_enh |= TI_LinkEnh_enab_accel;
+
+ pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
+ }
+
ar_context_init(&ohci->ar_request_ctx, ohci,
OHCI1394_AsReqRcvContextControlSet);
@@ -2466,7 +2586,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
pci_disable_device(dev);
fail_free:
kfree(&ohci->card);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
fail:
if (err == -ENOMEM)
fw_error("Out of memory\n");
@@ -2509,7 +2629,7 @@ static void pci_remove(struct pci_dev *dev)
pci_release_region(dev, 0);
pci_disable_device(dev);
kfree(&ohci->card);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
fw_notify("Removed fw-ohci device.\n");
}
@@ -2530,7 +2650,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
err = pci_set_power_state(dev, pci_choose_state(dev, state));
if (err)
fw_error("pci_set_power_state failed with %d\n", err);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
return 0;
}
@@ -2540,7 +2660,7 @@ static int pci_resume(struct pci_dev *dev)
struct fw_ohci *ohci = pci_get_drvdata(dev);
int err;
- ohci_pmac_on(dev);
+ pmac_ohci_on(dev);
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
err = pci_enable_device(dev);
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index ba492d85c516..3bc9a5d744eb 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -67,7 +67,7 @@
#define OHCI1394_PhyControl_ReadDone 0x80000000
#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
-#define OHCI1394_PhyControl_WriteDone 0x00004000
+#define OHCI1394_PhyControl_WritePending 0x00004000
#define OHCI1394_IsochronousCycleTimer 0x0F0
#define OHCI1394_AsReqFilterHiSet 0x100
#define OHCI1394_AsReqFilterHiClear 0x104
@@ -154,4 +154,12 @@
#define OHCI1394_phy_tcode 0xe
+/* TI extensions */
+
+#define PCI_CFG_TI_LinkEnh 0xf4
+#define TI_LinkEnh_enab_accel 0x00000002
+#define TI_LinkEnh_enab_unfair 0x00000080
+#define TI_LinkEnh_atx_thresh_mask 0x00003000
+#define TI_LinkEnh_atx_thresh_1_7K 0x00001000
+
#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index fee678f74a19..724038dab4ca 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -139,6 +139,13 @@ config GPIO_MAX732X
Board setup code must specify the model to use, and the start
number for these GPIOs.
+config GPIO_MAX732X_IRQ
+ bool "Interrupt controller support for MAX732x"
+ depends on GPIO_MAX732X=y && GENERIC_HARDIRQS
+ help
+ Say yes here to enable the max732x to be used as an interrupt
+ controller. It requires the driver to be built in the kernel.
+
config GPIO_PCA953X
tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports"
depends on I2C
@@ -188,6 +195,13 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
+config GPIO_TC35892
+ bool "TC35892 GPIOs"
+ depends on MFD_TC35892
+ help
+ This enables support for the GPIOs found on the TC35892
+ I/O Expander.
+
config GPIO_TWL4030
tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
depends on TWL4030_CORE
@@ -264,10 +278,10 @@ config GPIO_BT8XX
If unsure, say N.
config GPIO_LANGWELL
- bool "Intel Moorestown Platform Langwell GPIO support"
+ bool "Intel Langwell/Penwell GPIO support"
depends on PCI
help
- Say Y here to support Intel Moorestown platform GPIO.
+ Say Y here to support Intel Langwell/Penwell GPIO.
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
@@ -275,6 +289,15 @@ config GPIO_TIMBERDALE
---help---
Add support for the GPIO IP in the timberdale FPGA.
+config GPIO_RDC321X
+ tristate "RDC R-321x GPIO support"
+ depends on PCI && GPIOLIB
+ select MFD_CORE
+ select MFD_RDC321X
+ help
+ Support for the RDC R321x SoC GPIOs over southbridge
+ PCI configuration space.
+
comment "SPI GPIO expanders:"
config GPIO_MAX7301
@@ -310,4 +333,14 @@ config GPIO_UCB1400
To compile this driver as a module, choose M here: the
module will be called ucb1400_gpio.
+comment "MODULbus GPIO expanders:"
+
+config GPIO_JANZ_TTL
+ tristate "Janz VMOD-TTL Digital IO Module"
+ depends on MFD_JANZ_CMODIO
+ help
+ This enables support for the Janz VMOD-TTL Digital IO module.
+ This driver provides support for driving the pins in output
+ mode only. Input mode is not supported.
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 10f3f8d958b1..51c3cdd41b5a 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
+obj-$(CONFIG_GPIO_TC35892) += tc35892-gpio.o
obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
@@ -27,4 +28,6 @@ obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
-obj-$(CONFIG_GPIO_SCH) += sch_gpio.o \ No newline at end of file
+obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
+obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
+obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index 0c3c498f2260..f73a1555e49d 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -197,7 +197,7 @@ static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
return 0;
}
-static char *cs5535_gpio_names[] = {
+static const char * const cs5535_gpio_names[] = {
"GPIO0", "GPIO1", "GPIO2", "GPIO3",
"GPIO4", "GPIO5", "GPIO6", "GPIO7",
"GPIO8", "GPIO9", "GPIO10", "GPIO11",
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cae1b8c5b08c..3ca36542e338 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -722,7 +722,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
unsigned long flags;
struct gpio_desc *desc;
int status = -EINVAL;
- char *ioname = NULL;
+ const char *ioname = NULL;
/* can't export until sysfs is available ... */
if (!gpio_class.p) {
@@ -753,7 +753,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
struct device *dev;
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%d", gpio);
+ desc, ioname ? ioname : "gpio%u", gpio);
if (!IS_ERR(dev)) {
status = sysfs_create_group(&dev->kobj,
&gpio_attr_group);
@@ -1106,7 +1106,7 @@ unlock:
fail:
/* failures here can mean systems won't boot... */
if (status)
- pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n",
+ pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
chip->base, chip->base + chip->ngpio - 1,
chip->label ? : "generic");
return status;
@@ -1447,6 +1447,49 @@ fail:
}
EXPORT_SYMBOL_GPL(gpio_direction_output);
+/**
+ * gpio_set_debounce - sets @debounce time for a @gpio
+ * @gpio: the gpio to set debounce time
+ * @debounce: debounce time is microseconds
+ */
+int gpio_set_debounce(unsigned gpio, unsigned debounce)
+{
+ unsigned long flags;
+ struct gpio_chip *chip;
+ struct gpio_desc *desc = &gpio_desc[gpio];
+ int status = -EINVAL;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ if (!gpio_is_valid(gpio))
+ goto fail;
+ chip = desc->chip;
+ if (!chip || !chip->set || !chip->set_debounce)
+ goto fail;
+ gpio -= chip->base;
+ if (gpio >= chip->ngpio)
+ goto fail;
+ status = gpio_ensure_requested(desc, gpio);
+ if (status < 0)
+ goto fail;
+
+ /* now we know the gpio is valid and chip won't vanish */
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ might_sleep_if(extra_checks && chip->can_sleep);
+
+ return chip->set_debounce(chip, gpio, debounce);
+
+fail:
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n",
+ __func__, gpio, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_set_debounce);
/* I/O calls are only valid after configuration completed; the relevant
* "is this a valid GPIO" error checks should already have been done.
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c
index 41a9388f2fde..48fc43c4bdd1 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/it8761e_gpio.c
@@ -217,7 +217,10 @@ gpiochip_add_err:
static void __exit it8761e_gpio_exit(void)
{
if (gpio_ba) {
- gpiochip_remove(&it8761e_gpio_chip);
+ int ret = gpiochip_remove(&it8761e_gpio_chip);
+
+ WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n",
+ __func__, ret);
release_region(gpio_ba, GPIO_IOSIZE);
gpio_ba = 0;
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c
new file mode 100644
index 000000000000..813ac077e5d7
--- /dev/null
+++ b/drivers/gpio/janz-ttl.c
@@ -0,0 +1,258 @@
+/*
+ * Janz MODULbus VMOD-TTL GPIO Driver
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/janz.h>
+
+#define DRV_NAME "janz-ttl"
+
+#define PORTA_DIRECTION 0x23
+#define PORTB_DIRECTION 0x2B
+#define PORTC_DIRECTION 0x06
+#define PORTA_IOCTL 0x24
+#define PORTB_IOCTL 0x2C
+#define PORTC_IOCTL 0x07
+
+#define MASTER_INT_CTL 0x00
+#define MASTER_CONF_CTL 0x01
+
+#define CONF_PAE (1 << 2)
+#define CONF_PBE (1 << 7)
+#define CONF_PCE (1 << 4)
+
+struct ttl_control_regs {
+ __be16 portc;
+ __be16 portb;
+ __be16 porta;
+ __be16 control;
+};
+
+struct ttl_module {
+ struct gpio_chip gpio;
+
+ /* base address of registers */
+ struct ttl_control_regs __iomem *regs;
+
+ u8 portc_shadow;
+ u8 portb_shadow;
+ u8 porta_shadow;
+
+ spinlock_t lock;
+};
+
+static int ttl_get_value(struct gpio_chip *gpio, unsigned offset)
+{
+ struct ttl_module *mod = dev_get_drvdata(gpio->dev);
+ u8 *shadow;
+ int ret;
+
+ if (offset < 8) {
+ shadow = &mod->porta_shadow;
+ } else if (offset < 16) {
+ shadow = &mod->portb_shadow;
+ offset -= 8;
+ } else {
+ shadow = &mod->portc_shadow;
+ offset -= 16;
+ }
+
+ spin_lock(&mod->lock);
+ ret = *shadow & (1 << offset);
+ spin_unlock(&mod->lock);
+ return ret;
+}
+
+static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
+{
+ struct ttl_module *mod = dev_get_drvdata(gpio->dev);
+ void __iomem *port;
+ u8 *shadow;
+
+ if (offset < 8) {
+ port = &mod->regs->porta;
+ shadow = &mod->porta_shadow;
+ } else if (offset < 16) {
+ port = &mod->regs->portb;
+ shadow = &mod->portb_shadow;
+ offset -= 8;
+ } else {
+ port = &mod->regs->portc;
+ shadow = &mod->portc_shadow;
+ offset -= 16;
+ }
+
+ spin_lock(&mod->lock);
+ if (value)
+ *shadow |= (1 << offset);
+ else
+ *shadow &= ~(1 << offset);
+
+ iowrite16be(*shadow, port);
+ spin_unlock(&mod->lock);
+}
+
+static void __devinit ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
+{
+ iowrite16be(reg, &mod->regs->control);
+ iowrite16be(val, &mod->regs->control);
+}
+
+static void __devinit ttl_setup_device(struct ttl_module *mod)
+{
+ /* reset the device to a known state */
+ iowrite16be(0x0000, &mod->regs->control);
+ iowrite16be(0x0001, &mod->regs->control);
+ iowrite16be(0x0000, &mod->regs->control);
+
+ /* put all ports in open-drain mode */
+ ttl_write_reg(mod, PORTA_IOCTL, 0x00ff);
+ ttl_write_reg(mod, PORTB_IOCTL, 0x00ff);
+ ttl_write_reg(mod, PORTC_IOCTL, 0x000f);
+
+ /* set all ports as outputs */
+ ttl_write_reg(mod, PORTA_DIRECTION, 0x0000);
+ ttl_write_reg(mod, PORTB_DIRECTION, 0x0000);
+ ttl_write_reg(mod, PORTC_DIRECTION, 0x0000);
+
+ /* set all ports to drive zeroes */
+ iowrite16be(0x0000, &mod->regs->porta);
+ iowrite16be(0x0000, &mod->regs->portb);
+ iowrite16be(0x0000, &mod->regs->portc);
+
+ /* enable all ports */
+ ttl_write_reg(mod, MASTER_CONF_CTL, CONF_PAE | CONF_PBE | CONF_PCE);
+}
+
+static int __devinit ttl_probe(struct platform_device *pdev)
+{
+ struct janz_platform_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct ttl_module *mod;
+ struct gpio_chip *gpio;
+ struct resource *res;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data\n");
+ ret = -ENXIO;
+ goto out_return;
+ }
+
+ mod = kzalloc(sizeof(*mod), GFP_KERNEL);
+ if (!mod) {
+ dev_err(dev, "unable to allocate private data\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ platform_set_drvdata(pdev, mod);
+ spin_lock_init(&mod->lock);
+
+ /* get access to the MODULbus registers for this module */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "MODULbus registers not found\n");
+ ret = -ENODEV;
+ goto out_free_mod;
+ }
+
+ mod->regs = ioremap(res->start, resource_size(res));
+ if (!mod->regs) {
+ dev_err(dev, "MODULbus registers not ioremap\n");
+ ret = -ENOMEM;
+ goto out_free_mod;
+ }
+
+ ttl_setup_device(mod);
+
+ /* Initialize the GPIO data structures */
+ gpio = &mod->gpio;
+ gpio->dev = &pdev->dev;
+ gpio->label = pdev->name;
+ gpio->get = ttl_get_value;
+ gpio->set = ttl_set_value;
+ gpio->owner = THIS_MODULE;
+
+ /* request dynamic allocation */
+ gpio->base = -1;
+ gpio->ngpio = 20;
+
+ ret = gpiochip_add(gpio);
+ if (ret) {
+ dev_err(dev, "unable to add GPIO chip\n");
+ goto out_iounmap_regs;
+ }
+
+ dev_info(&pdev->dev, "module %d: registered GPIO device\n",
+ pdata->modno);
+ return 0;
+
+out_iounmap_regs:
+ iounmap(mod->regs);
+out_free_mod:
+ kfree(mod);
+out_return:
+ return ret;
+}
+
+static int __devexit ttl_remove(struct platform_device *pdev)
+{
+ struct ttl_module *mod = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = gpiochip_remove(&mod->gpio);
+ if (ret) {
+ dev_err(dev, "unable to remove GPIO chip\n");
+ return ret;
+ }
+
+ iounmap(mod->regs);
+ kfree(mod);
+ return 0;
+}
+
+static struct platform_driver ttl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ttl_probe,
+ .remove = __devexit_p(ttl_remove),
+};
+
+static int __init ttl_init(void)
+{
+ return platform_driver_register(&ttl_driver);
+}
+
+static void __exit ttl_exit(void)
+{
+ platform_driver_unregister(&ttl_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:janz-ttl");
+
+module_init(ttl_init);
+module_exit(ttl_exit);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 00c3a14127af..8383a8d7f994 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -17,6 +17,7 @@
/* Supports:
* Moorestown platform Langwell chip.
+ * Medfield platform Penwell chip.
*/
#include <linux/module.h>
@@ -31,44 +32,65 @@
#include <linux/gpio.h>
#include <linux/slab.h>
-struct lnw_gpio_register {
- u32 GPLR[2];
- u32 GPDR[2];
- u32 GPSR[2];
- u32 GPCR[2];
- u32 GRER[2];
- u32 GFER[2];
- u32 GEDR[2];
+/*
+ * Langwell chip has 64 pins and thus there are 2 32bit registers to control
+ * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
+ * registers to control them, so we only define the order here instead of a
+ * structure, to get a bit offset for a pin (use GPDR as an example):
+ *
+ * nreg = ngpio / 32;
+ * reg = offset / 32;
+ * bit = offset % 32;
+ * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
+ *
+ * so the bit of reg_addr is to control pin offset's GPDR feature
+*/
+
+enum GPIO_REG {
+ GPLR = 0, /* pin level read-only */
+ GPDR, /* pin direction */
+ GPSR, /* pin set */
+ GPCR, /* pin clear */
+ GRER, /* rising edge detect */
+ GFER, /* falling edge detect */
+ GEDR, /* edge detect result */
};
struct lnw_gpio {
struct gpio_chip chip;
- struct lnw_gpio_register *reg_base;
+ void *reg_base;
spinlock_t lock;
unsigned irq_base;
};
-static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
+static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
+ enum GPIO_REG reg_type)
{
struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ unsigned nreg = chip->ngpio / 32;
u8 reg = offset / 32;
- void __iomem *gplr;
+ void __iomem *ptr;
+
+ ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
+ return ptr;
+}
+
+static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ void __iomem *gplr = gpio_reg(chip, offset, GPLR);
- gplr = (void __iomem *)(&lnw->reg_base->GPLR[reg]);
return readl(gplr) & BIT(offset % 32);
}
static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
- u8 reg = offset / 32;
void __iomem *gpsr, *gpcr;
if (value) {
- gpsr = (void __iomem *)(&lnw->reg_base->GPSR[reg]);
+ gpsr = gpio_reg(chip, offset, GPSR);
writel(BIT(offset % 32), gpsr);
} else {
- gpcr = (void __iomem *)(&lnw->reg_base->GPCR[reg]);
+ gpcr = gpio_reg(chip, offset, GPCR);
writel(BIT(offset % 32), gpcr);
}
}
@@ -76,12 +98,10 @@ static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
- u8 reg = offset / 32;
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
u32 value;
unsigned long flags;
- void __iomem *gpdr;
- gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value &= ~BIT(offset % 32);
@@ -94,12 +114,10 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
- u8 reg = offset / 32;
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
unsigned long flags;
- void __iomem *gpdr;
lnw_gpio_set(chip, offset, value);
- gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value |= BIT(offset % 32);;
@@ -118,11 +136,10 @@ static int lnw_irq_type(unsigned irq, unsigned type)
{
struct lnw_gpio *lnw = get_irq_chip_data(irq);
u32 gpio = irq - lnw->irq_base;
- u8 reg = gpio / 32;
unsigned long flags;
u32 value;
- void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]);
- void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]);
+ void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
+ void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
if (gpio >= lnw->chip.ngpio)
return -EINVAL;
@@ -158,8 +175,10 @@ static struct irq_chip lnw_irqchip = {
.set_type = lnw_irq_type,
};
-static struct pci_device_id lnw_gpio_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f) },
+static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -167,17 +186,17 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
- u32 reg, gpio;
+ u32 base, gpio;
void __iomem *gedr;
u32 gedr_v;
/* check GPIO controller to check which pin triggered the interrupt */
- for (reg = 0; reg < lnw->chip.ngpio / 32; reg++) {
- gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]);
+ for (base = 0; base < lnw->chip.ngpio; base += 32) {
+ gedr = gpio_reg(&lnw->chip, base, GEDR);
gedr_v = readl(gedr);
if (!gedr_v)
continue;
- for (gpio = reg*32; gpio < reg*32+32; gpio++)
+ for (gpio = base; gpio < base + 32; gpio++)
if (gedr_v & BIT(gpio % 32)) {
pr_debug("pin %d triggered\n", gpio);
generic_handle_irq(lnw->irq_base + gpio);
@@ -245,7 +264,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->chip.set = lnw_gpio_set;
lnw->chip.to_irq = lnw_gpio_to_irq;
lnw->chip.base = gpio_base;
- lnw->chip.ngpio = 64;
+ lnw->chip.ngpio = id->driver_data;
lnw->chip.can_sleep = 0;
pci_set_drvdata(pdev, lnw);
retval = gpiochip_add(&lnw->chip);
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index f7868243af89..9cad60f9e962 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -17,7 +17,8 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/gpio.h>
-
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/i2c/max732x.h>
@@ -31,7 +32,8 @@
* - Open Drain I/O
*
* designated by 'O', 'I' and 'P' individually according to MAXIM's
- * datasheets.
+ * datasheets. 'I' and 'P' ports are interrupt capables, some with
+ * a dedicated interrupt mask.
*
* There are two groups of I/O ports, each group usually includes
* up to 8 I/O ports, and is accessed by a specific I2C address:
@@ -44,7 +46,8 @@
*
* Within each group of ports, there are five known combinations of
* I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for
- * the detailed organization of these ports.
+ * the detailed organization of these ports. Only Goup A is interrupt
+ * capable.
*
* GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16',
* and GPIOs from GROUP_A are numbered before those from GROUP_B
@@ -68,16 +71,47 @@
#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */
#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */
+#define INT_NONE 0x0 /* No interrupt capability */
+#define INT_NO_MASK 0x1 /* Has interrupts, no mask */
+#define INT_INDEP_MASK 0x2 /* Has interrupts, independent mask */
+#define INT_MERGED_MASK 0x3 /* Has interrupts, merged mask */
+
+#define INT_CAPS(x) (((uint64_t)(x)) << 32)
+
+enum {
+ MAX7319,
+ MAX7320,
+ MAX7321,
+ MAX7322,
+ MAX7323,
+ MAX7324,
+ MAX7325,
+ MAX7326,
+ MAX7327,
+};
+
+static uint64_t max732x_features[] = {
+ [MAX7319] = GROUP_A(IO_8I) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7320] = GROUP_B(IO_8O),
+ [MAX7321] = GROUP_A(IO_8P) | INT_CAPS(INT_NO_MASK),
+ [MAX7322] = GROUP_A(IO_4I4O) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7323] = GROUP_A(IO_4P4O) | INT_CAPS(INT_INDEP_MASK),
+ [MAX7324] = GROUP_A(IO_8I) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7325] = GROUP_A(IO_8P) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
+ [MAX7326] = GROUP_A(IO_4I4O) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7327] = GROUP_A(IO_4P4O) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
+};
+
static const struct i2c_device_id max732x_id[] = {
- { "max7319", GROUP_A(IO_8I) },
- { "max7320", GROUP_B(IO_8O) },
- { "max7321", GROUP_A(IO_8P) },
- { "max7322", GROUP_A(IO_4I4O) },
- { "max7323", GROUP_A(IO_4P4O) },
- { "max7324", GROUP_A(IO_8I) | GROUP_B(IO_8O) },
- { "max7325", GROUP_A(IO_8P) | GROUP_B(IO_8O) },
- { "max7326", GROUP_A(IO_4I4O) | GROUP_B(IO_8O) },
- { "max7327", GROUP_A(IO_4P4O) | GROUP_B(IO_8O) },
+ { "max7319", MAX7319 },
+ { "max7320", MAX7320 },
+ { "max7321", MAX7321 },
+ { "max7322", MAX7322 },
+ { "max7323", MAX7323 },
+ { "max7324", MAX7324 },
+ { "max7325", MAX7325 },
+ { "max7326", MAX7326 },
+ { "max7327", MAX7327 },
{ },
};
MODULE_DEVICE_TABLE(i2c, max732x_id);
@@ -96,9 +130,19 @@ struct max732x_chip {
struct mutex lock;
uint8_t reg_out[2];
+
+#ifdef CONFIG_GPIO_MAX732X_IRQ
+ struct mutex irq_lock;
+ int irq_base;
+ uint8_t irq_mask;
+ uint8_t irq_mask_cur;
+ uint8_t irq_trig_raise;
+ uint8_t irq_trig_fall;
+ uint8_t irq_features;
+#endif
};
-static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
+static int max732x_writeb(struct max732x_chip *chip, int group_a, uint8_t val)
{
struct i2c_client *client;
int ret;
@@ -113,7 +157,7 @@ static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
return 0;
}
-static int max732x_read(struct max732x_chip *chip, int group_a, uint8_t *val)
+static int max732x_readb(struct max732x_chip *chip, int group_a, uint8_t *val)
{
struct i2c_client *client;
int ret;
@@ -142,7 +186,7 @@ static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off)
chip = container_of(gc, struct max732x_chip, gpio_chip);
- ret = max732x_read(chip, is_group_a(chip, off), &reg_val);
+ ret = max732x_readb(chip, is_group_a(chip, off), &reg_val);
if (ret < 0)
return 0;
@@ -162,7 +206,7 @@ static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0];
reg_out = (val) ? reg_out | mask : reg_out & ~mask;
- ret = max732x_write(chip, is_group_a(chip, off), reg_out);
+ ret = max732x_writeb(chip, is_group_a(chip, off), reg_out);
if (ret < 0)
goto out;
@@ -188,6 +232,13 @@ static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
return -EACCES;
}
+ /*
+ * Open-drain pins must be set to high impedance (which is
+ * equivalent to output-high) to be turned into an input.
+ */
+ if ((mask & chip->dir_output))
+ max732x_gpio_set_value(gc, off, 1);
+
return 0;
}
@@ -209,12 +260,278 @@ static int max732x_gpio_direction_output(struct gpio_chip *gc,
return 0;
}
+#ifdef CONFIG_GPIO_MAX732X_IRQ
+static int max732x_writew(struct max732x_chip *chip, uint16_t val)
+{
+ int ret;
+
+ val = cpu_to_le16(val);
+
+ ret = i2c_master_send(chip->client_group_a, (char *)&val, 2);
+ if (ret < 0) {
+ dev_err(&chip->client_group_a->dev, "failed writing\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max732x_readw(struct max732x_chip *chip, uint16_t *val)
+{
+ int ret;
+
+ ret = i2c_master_recv(chip->client_group_a, (char *)val, 2);
+ if (ret < 0) {
+ dev_err(&chip->client_group_a->dev, "failed reading\n");
+ return ret;
+ }
+
+ *val = le16_to_cpu(*val);
+ return 0;
+}
+
+static void max732x_irq_update_mask(struct max732x_chip *chip)
+{
+ uint16_t msg;
+
+ if (chip->irq_mask == chip->irq_mask_cur)
+ return;
+
+ chip->irq_mask = chip->irq_mask_cur;
+
+ if (chip->irq_features == INT_NO_MASK)
+ return;
+
+ mutex_lock(&chip->lock);
+
+ switch (chip->irq_features) {
+ case INT_INDEP_MASK:
+ msg = (chip->irq_mask << 8) | chip->reg_out[0];
+ max732x_writew(chip, msg);
+ break;
+
+ case INT_MERGED_MASK:
+ msg = chip->irq_mask | chip->reg_out[0];
+ max732x_writeb(chip, 1, (uint8_t)msg);
+ break;
+ }
+
+ mutex_unlock(&chip->lock);
+}
+
+static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
+{
+ struct max732x_chip *chip;
+
+ chip = container_of(gc, struct max732x_chip, gpio_chip);
+ return chip->irq_base + off;
+}
+
+static void max732x_irq_mask(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
+}
+
+static void max732x_irq_unmask(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
+}
+
+static void max732x_irq_bus_lock(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ mutex_lock(&chip->irq_lock);
+ chip->irq_mask_cur = chip->irq_mask;
+}
+
+static void max732x_irq_bus_sync_unlock(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ max732x_irq_update_mask(chip);
+ mutex_unlock(&chip->irq_lock);
+}
+
+static int max732x_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+ uint16_t off = irq - chip->irq_base;
+ uint16_t mask = 1 << off;
+
+ if (!(mask & chip->dir_input)) {
+ dev_dbg(&chip->client->dev, "%s port %d is output only\n",
+ chip->client->name, off);
+ return -EACCES;
+ }
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH)) {
+ dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
+ irq, type);
+ return -EINVAL;
+ }
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ chip->irq_trig_fall |= mask;
+ else
+ chip->irq_trig_fall &= ~mask;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ chip->irq_trig_raise |= mask;
+ else
+ chip->irq_trig_raise &= ~mask;
+
+ return max732x_gpio_direction_input(&chip->gpio_chip, off);
+}
+
+static struct irq_chip max732x_irq_chip = {
+ .name = "max732x",
+ .mask = max732x_irq_mask,
+ .unmask = max732x_irq_unmask,
+ .bus_lock = max732x_irq_bus_lock,
+ .bus_sync_unlock = max732x_irq_bus_sync_unlock,
+ .set_type = max732x_irq_set_type,
+};
+
+static uint8_t max732x_irq_pending(struct max732x_chip *chip)
+{
+ uint8_t cur_stat;
+ uint8_t old_stat;
+ uint8_t trigger;
+ uint8_t pending;
+ uint16_t status;
+ int ret;
+
+ ret = max732x_readw(chip, &status);
+ if (ret)
+ return 0;
+
+ trigger = status >> 8;
+ trigger &= chip->irq_mask;
+
+ if (!trigger)
+ return 0;
+
+ cur_stat = status & 0xFF;
+ cur_stat &= chip->irq_mask;
+
+ old_stat = cur_stat ^ trigger;
+
+ pending = (old_stat & chip->irq_trig_fall) |
+ (cur_stat & chip->irq_trig_raise);
+ pending &= trigger;
+
+ return pending;
+}
+
+static irqreturn_t max732x_irq_handler(int irq, void *devid)
+{
+ struct max732x_chip *chip = devid;
+ uint8_t pending;
+ uint8_t level;
+
+ pending = max732x_irq_pending(chip);
+
+ if (!pending)
+ return IRQ_HANDLED;
+
+ do {
+ level = __ffs(pending);
+ handle_nested_irq(level + chip->irq_base);
+
+ pending &= ~(1 << level);
+ } while (pending);
+
+ return IRQ_HANDLED;
+}
+
+static int max732x_irq_setup(struct max732x_chip *chip,
+ const struct i2c_device_id *id)
+{
+ struct i2c_client *client = chip->client;
+ struct max732x_platform_data *pdata = client->dev.platform_data;
+ int has_irq = max732x_features[id->driver_data] >> 32;
+ int ret;
+
+ if (pdata->irq_base && has_irq != INT_NONE) {
+ int lvl;
+
+ chip->irq_base = pdata->irq_base;
+ chip->irq_features = has_irq;
+ mutex_init(&chip->irq_lock);
+
+ for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
+ int irq = lvl + chip->irq_base;
+
+ if (!(chip->dir_input & (1 << lvl)))
+ continue;
+
+ set_irq_chip_data(irq, chip);
+ set_irq_chip_and_handler(irq, &max732x_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ ret = request_threaded_irq(client->irq,
+ NULL,
+ max732x_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), chip);
+ if (ret) {
+ dev_err(&client->dev, "failed to request irq %d\n",
+ client->irq);
+ goto out_failed;
+ }
+
+ chip->gpio_chip.to_irq = max732x_gpio_to_irq;
+ }
+
+ return 0;
+
+out_failed:
+ chip->irq_base = 0;
+ return ret;
+}
+
+static void max732x_irq_teardown(struct max732x_chip *chip)
+{
+ if (chip->irq_base)
+ free_irq(chip->client->irq, chip);
+}
+#else /* CONFIG_GPIO_MAX732X_IRQ */
+static int max732x_irq_setup(struct max732x_chip *chip,
+ const struct i2c_device_id *id)
+{
+ struct i2c_client *client = chip->client;
+ struct max732x_platform_data *pdata = client->dev.platform_data;
+ int has_irq = max732x_features[id->driver_data] >> 32;
+
+ if (pdata->irq_base && has_irq != INT_NONE)
+ dev_warn(&client->dev, "interrupt support not compiled in\n");
+
+ return 0;
+}
+
+static void max732x_irq_teardown(struct max732x_chip *chip)
+{
+}
+#endif
+
static int __devinit max732x_setup_gpio(struct max732x_chip *chip,
const struct i2c_device_id *id,
unsigned gpio_start)
{
struct gpio_chip *gc = &chip->gpio_chip;
- uint32_t id_data = id->driver_data;
+ uint32_t id_data = (uint32_t)max732x_features[id->driver_data];
int i, port = 0;
for (i = 0; i < 16; i++, id_data >>= 2) {
@@ -285,14 +602,14 @@ static int __devinit max732x_probe(struct i2c_client *client,
switch (client->addr & 0x70) {
case 0x60:
chip->client_group_a = client;
- if (nr_port > 7) {
+ if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_b);
chip->client_group_b = chip->client_dummy = c;
}
break;
case 0x50:
chip->client_group_b = client;
- if (nr_port > 7) {
+ if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_a);
chip->client_group_a = chip->client_dummy = c;
}
@@ -306,9 +623,13 @@ static int __devinit max732x_probe(struct i2c_client *client,
mutex_init(&chip->lock);
- max732x_read(chip, is_group_a(chip, 0), &chip->reg_out[0]);
- if (nr_port > 7)
- max732x_read(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+ max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
+ if (nr_port > 8)
+ max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+
+ ret = max732x_irq_setup(chip, id);
+ if (ret)
+ goto out_failed;
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
@@ -325,6 +646,7 @@ static int __devinit max732x_probe(struct i2c_client *client,
return 0;
out_failed:
+ max732x_irq_teardown(chip);
kfree(chip);
return ret;
}
@@ -352,6 +674,8 @@ static int __devexit max732x_remove(struct i2c_client *client)
return ret;
}
+ max732x_irq_teardown(chip);
+
/* unregister any dummy i2c_client */
if (chip->client_dummy)
i2c_unregister_device(chip->client_dummy);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index b827c976dc62..a2b12aa1f2b9 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -73,7 +73,7 @@ struct pca953x_chip {
struct i2c_client *client;
struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
- char **names;
+ const char *const *names;
};
static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
@@ -449,7 +449,7 @@ pca953x_get_alt_pdata(struct i2c_client *client)
struct device_node *node;
const uint16_t *val;
- node = dev_archdata_get_node(&client->dev.archdata);
+ node = client->dev.of_node;
if (node == NULL)
return NULL;
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 105701a1f05b..ee568c8fcbd0 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -164,7 +164,7 @@ static int pl061_irq_type(unsigned irq, unsigned trigger)
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
- if (offset < 0 || offset > PL061_GPIO_NR)
+ if (offset < 0 || offset >= PL061_GPIO_NR)
return -EINVAL;
spin_lock_irqsave(&chip->irq_lock, flags);
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c
new file mode 100644
index 000000000000..2762698e0204
--- /dev/null
+++ b/drivers/gpio/rdc321x-gpio.c
@@ -0,0 +1,246 @@
+/*
+ * RDC321x GPIO driver
+ *
+ * Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
+ * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/mfd/rdc321x.h>
+#include <linux/slab.h>
+
+struct rdc321x_gpio {
+ spinlock_t lock;
+ struct pci_dev *sb_pdev;
+ u32 data_reg[2];
+ int reg1_ctrl_base;
+ int reg1_data_base;
+ int reg2_ctrl_base;
+ int reg2_data_base;
+ struct gpio_chip chip;
+};
+
+/* read GPIO pin */
+static int rdc_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct rdc321x_gpio *gpch;
+ u32 value = 0;
+ int reg;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+ reg = gpio < 32 ? gpch->reg1_data_base : gpch->reg2_data_base;
+
+ spin_lock(&gpch->lock);
+ pci_write_config_dword(gpch->sb_pdev, reg,
+ gpch->data_reg[gpio < 32 ? 0 : 1]);
+ pci_read_config_dword(gpch->sb_pdev, reg, &value);
+ spin_unlock(&gpch->lock);
+
+ return (1 << (gpio & 0x1f)) & value ? 1 : 0;
+}
+
+static void rdc_gpio_set_value_impl(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+ int reg = (gpio < 32) ? 0 : 1;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+
+ if (value)
+ gpch->data_reg[reg] |= 1 << (gpio & 0x1f);
+ else
+ gpch->data_reg[reg] &= ~(1 << (gpio & 0x1f));
+
+ pci_write_config_dword(gpch->sb_pdev,
+ reg ? gpch->reg2_data_base : gpch->reg1_data_base,
+ gpch->data_reg[reg]);
+}
+
+/* set GPIO pin to value */
+static void rdc_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+ spin_lock(&gpch->lock);
+ rdc_gpio_set_value_impl(chip, gpio, value);
+ spin_unlock(&gpch->lock);
+}
+
+static int rdc_gpio_config(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+ int err;
+ u32 reg;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+
+ spin_lock(&gpch->lock);
+ err = pci_read_config_dword(gpch->sb_pdev, gpio < 32 ?
+ gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, &reg);
+ if (err)
+ goto unlock;
+
+ reg |= 1 << (gpio & 0x1f);
+
+ err = pci_write_config_dword(gpch->sb_pdev, gpio < 32 ?
+ gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, reg);
+ if (err)
+ goto unlock;
+
+ rdc_gpio_set_value_impl(chip, gpio, value);
+
+unlock:
+ spin_unlock(&gpch->lock);
+
+ return err;
+}
+
+/* configure GPIO pin as input */
+static int rdc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ return rdc_gpio_config(chip, gpio, 1);
+}
+
+/*
+ * Cache the initial value of both GPIO data registers
+ */
+static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
+{
+ int err;
+ struct resource *r;
+ struct rdc321x_gpio *rdc321x_gpio_dev;
+ struct rdc321x_gpio_pdata *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -ENODEV;
+ }
+
+ rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL);
+ if (!rdc321x_gpio_dev) {
+ dev_err(&pdev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1");
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n");
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ spin_lock_init(&rdc321x_gpio_dev->lock);
+ rdc321x_gpio_dev->sb_pdev = pdata->sb_pdev;
+ rdc321x_gpio_dev->reg1_ctrl_base = r->start;
+ rdc321x_gpio_dev->reg1_data_base = r->start + 0x4;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2");
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n");
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ rdc321x_gpio_dev->reg2_ctrl_base = r->start;
+ rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;
+
+ rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
+ rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
+ rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
+ rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
+ rdc321x_gpio_dev->chip.set = rdc_gpio_set_value;
+ rdc321x_gpio_dev->chip.base = 0;
+ rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios;
+
+ platform_set_drvdata(pdev, rdc321x_gpio_dev);
+
+ /* This might not be, what others (BIOS, bootloader, etc.)
+ wrote to these registers before, but it's a good guess. Still
+ better than just using 0xffffffff. */
+ err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
+ rdc321x_gpio_dev->reg1_data_base,
+ &rdc321x_gpio_dev->data_reg[0]);
+ if (err)
+ goto out_drvdata;
+
+ err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
+ rdc321x_gpio_dev->reg2_data_base,
+ &rdc321x_gpio_dev->data_reg[1]);
+ if (err)
+ goto out_drvdata;
+
+ dev_info(&pdev->dev, "registering %d GPIOs\n",
+ rdc321x_gpio_dev->chip.ngpio);
+ return gpiochip_add(&rdc321x_gpio_dev->chip);
+
+out_drvdata:
+ platform_set_drvdata(pdev, NULL);
+out_free:
+ kfree(rdc321x_gpio_dev);
+ return err;
+}
+
+static int __devexit rdc321x_gpio_remove(struct platform_device *pdev)
+{
+ int ret;
+ struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev);
+
+ ret = gpiochip_remove(&rdc321x_gpio_dev->chip);
+ if (ret)
+ dev_err(&pdev->dev, "failed to unregister chip\n");
+
+ kfree(rdc321x_gpio_dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static struct platform_driver rdc321x_gpio_driver = {
+ .driver.name = "rdc321x-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = rdc321x_gpio_probe,
+ .remove = __devexit_p(rdc321x_gpio_remove),
+};
+
+static int __init rdc321x_gpio_init(void)
+{
+ return platform_driver_register(&rdc321x_gpio_driver);
+}
+
+static void __exit rdc321x_gpio_exit(void)
+{
+ platform_driver_unregister(&rdc321x_gpio_driver);
+}
+
+module_init(rdc321x_gpio_init);
+module_exit(rdc321x_gpio_exit);
+
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_DESCRIPTION("RDC321x GPIO driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rdc321x-gpio");
diff --git a/drivers/gpio/tc35892-gpio.c b/drivers/gpio/tc35892-gpio.c
new file mode 100644
index 000000000000..1be6288780de
--- /dev/null
+++ b/drivers/gpio/tc35892-gpio.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/tc35892.h>
+
+/*
+ * These registers are modified under the irq bus lock and cached to avoid
+ * unnecessary writes in bus_sync_unlock.
+ */
+enum { REG_IBE, REG_IEV, REG_IS, REG_IE };
+
+#define CACHE_NR_REGS 4
+#define CACHE_NR_BANKS 3
+
+struct tc35892_gpio {
+ struct gpio_chip chip;
+ struct tc35892 *tc35892;
+ struct device *dev;
+ struct mutex irq_lock;
+
+ int irq_base;
+
+ /* Caches of interrupt control registers for bus_lock */
+ u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS];
+ u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS];
+};
+
+static inline struct tc35892_gpio *to_tc35892_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct tc35892_gpio, chip);
+}
+
+static int tc35892_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2;
+ u8 mask = 1 << (offset % 8);
+ int ret;
+
+ ret = tc35892_reg_read(tc35892, reg);
+ if (ret < 0)
+ return ret;
+
+ return ret & mask;
+}
+
+static void tc35892_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2;
+ unsigned pos = offset % 8;
+ u8 data[] = {!!val << pos, 1 << pos};
+
+ tc35892_block_write(tc35892, reg, ARRAY_SIZE(data), data);
+}
+
+static int tc35892_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int val)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODIR0 + offset / 8;
+ unsigned pos = offset % 8;
+
+ tc35892_gpio_set(chip, offset, val);
+
+ return tc35892_set_bits(tc35892, reg, 1 << pos, 1 << pos);
+}
+
+static int tc35892_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODIR0 + offset / 8;
+ unsigned pos = offset % 8;
+
+ return tc35892_set_bits(tc35892, reg, 1 << pos, 0);
+}
+
+static int tc35892_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+
+ return tc35892_gpio->irq_base + offset;
+}
+
+static struct gpio_chip template_chip = {
+ .label = "tc35892",
+ .owner = THIS_MODULE,
+ .direction_input = tc35892_gpio_direction_input,
+ .get = tc35892_gpio_get,
+ .direction_output = tc35892_gpio_direction_output,
+ .set = tc35892_gpio_set,
+ .to_irq = tc35892_gpio_to_irq,
+ .can_sleep = 1,
+};
+
+static int tc35892_gpio_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ tc35892_gpio->regs[REG_IBE][regoffset] |= mask;
+ return 0;
+ }
+
+ tc35892_gpio->regs[REG_IBE][regoffset] &= ~mask;
+
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+ tc35892_gpio->regs[REG_IS][regoffset] |= mask;
+ else
+ tc35892_gpio->regs[REG_IS][regoffset] &= ~mask;
+
+ if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
+ tc35892_gpio->regs[REG_IEV][regoffset] |= mask;
+ else
+ tc35892_gpio->regs[REG_IEV][regoffset] &= ~mask;
+
+ return 0;
+}
+
+static void tc35892_gpio_irq_lock(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+
+ mutex_lock(&tc35892_gpio->irq_lock);
+}
+
+static void tc35892_gpio_irq_sync_unlock(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ static const u8 regmap[] = {
+ [REG_IBE] = TC35892_GPIOIBE0,
+ [REG_IEV] = TC35892_GPIOIEV0,
+ [REG_IS] = TC35892_GPIOIS0,
+ [REG_IE] = TC35892_GPIOIE0,
+ };
+ int i, j;
+
+ for (i = 0; i < CACHE_NR_REGS; i++) {
+ for (j = 0; j < CACHE_NR_BANKS; j++) {
+ u8 old = tc35892_gpio->oldregs[i][j];
+ u8 new = tc35892_gpio->regs[i][j];
+
+ if (new == old)
+ continue;
+
+ tc35892_gpio->oldregs[i][j] = new;
+ tc35892_reg_write(tc35892, regmap[i] + j * 8, new);
+ }
+ }
+
+ mutex_unlock(&tc35892_gpio->irq_lock);
+}
+
+static void tc35892_gpio_irq_mask(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ tc35892_gpio->regs[REG_IE][regoffset] &= ~mask;
+}
+
+static void tc35892_gpio_irq_unmask(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ tc35892_gpio->regs[REG_IE][regoffset] |= mask;
+}
+
+static struct irq_chip tc35892_gpio_irq_chip = {
+ .name = "tc35892-gpio",
+ .bus_lock = tc35892_gpio_irq_lock,
+ .bus_sync_unlock = tc35892_gpio_irq_sync_unlock,
+ .mask = tc35892_gpio_irq_mask,
+ .unmask = tc35892_gpio_irq_unmask,
+ .set_type = tc35892_gpio_irq_set_type,
+};
+
+static irqreturn_t tc35892_gpio_irq(int irq, void *dev)
+{
+ struct tc35892_gpio *tc35892_gpio = dev;
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 status[CACHE_NR_BANKS];
+ int ret;
+ int i;
+
+ ret = tc35892_block_read(tc35892, TC35892_GPIOMIS0,
+ ARRAY_SIZE(status), status);
+ if (ret < 0)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(status); i++) {
+ unsigned int stat = status[i];
+ if (!stat)
+ continue;
+
+ while (stat) {
+ int bit = __ffs(stat);
+ int line = i * 8 + bit;
+
+ handle_nested_irq(tc35892_gpio->irq_base + line);
+ stat &= ~(1 << bit);
+ }
+
+ tc35892_reg_write(tc35892, TC35892_GPIOIC0 + i, status[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tc35892_gpio_irq_init(struct tc35892_gpio *tc35892_gpio)
+{
+ int base = tc35892_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) {
+ set_irq_chip_data(irq, tc35892_gpio);
+ set_irq_chip_and_handler(irq, &tc35892_gpio_irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void tc35892_gpio_irq_remove(struct tc35892_gpio *tc35892_gpio)
+{
+ int base = tc35892_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static int __devinit tc35892_gpio_probe(struct platform_device *pdev)
+{
+ struct tc35892 *tc35892 = dev_get_drvdata(pdev->dev.parent);
+ struct tc35892_gpio_platform_data *pdata;
+ struct tc35892_gpio *tc35892_gpio;
+ int ret;
+ int irq;
+
+ pdata = tc35892->pdata->gpio;
+ if (!pdata)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ tc35892_gpio = kzalloc(sizeof(struct tc35892_gpio), GFP_KERNEL);
+ if (!tc35892_gpio)
+ return -ENOMEM;
+
+ mutex_init(&tc35892_gpio->irq_lock);
+
+ tc35892_gpio->dev = &pdev->dev;
+ tc35892_gpio->tc35892 = tc35892;
+
+ tc35892_gpio->chip = template_chip;
+ tc35892_gpio->chip.ngpio = tc35892->num_gpio;
+ tc35892_gpio->chip.dev = &pdev->dev;
+ tc35892_gpio->chip.base = pdata->gpio_base;
+
+ tc35892_gpio->irq_base = tc35892->irq_base + TC35892_INT_GPIO(0);
+
+ /* Bring the GPIO module out of reset */
+ ret = tc35892_set_bits(tc35892, TC35892_RSTCTRL,
+ TC35892_RSTCTRL_GPIRST, 0);
+ if (ret < 0)
+ goto out_free;
+
+ ret = tc35892_gpio_irq_init(tc35892_gpio);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(irq, NULL, tc35892_gpio_irq, IRQF_ONESHOT,
+ "tc35892-gpio", tc35892_gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = gpiochip_add(&tc35892_gpio->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
+ goto out_freeirq;
+ }
+
+ platform_set_drvdata(pdev, tc35892_gpio);
+
+ return 0;
+
+out_freeirq:
+ free_irq(irq, tc35892_gpio);
+out_removeirq:
+ tc35892_gpio_irq_remove(tc35892_gpio);
+out_free:
+ kfree(tc35892_gpio);
+ return ret;
+}
+
+static int __devexit tc35892_gpio_remove(struct platform_device *pdev)
+{
+ struct tc35892_gpio *tc35892_gpio = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+ int ret;
+
+ ret = gpiochip_remove(&tc35892_gpio->chip);
+ if (ret < 0) {
+ dev_err(tc35892_gpio->dev,
+ "unable to remove gpiochip: %d\n", ret);
+ return ret;
+ }
+
+ free_irq(irq, tc35892_gpio);
+ tc35892_gpio_irq_remove(tc35892_gpio);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(tc35892_gpio);
+
+ return 0;
+}
+
+static struct platform_driver tc35892_gpio_driver = {
+ .driver.name = "tc35892-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = tc35892_gpio_probe,
+ .remove = __devexit_p(tc35892_gpio_remove),
+};
+
+static int __init tc35892_gpio_init(void)
+{
+ return platform_driver_register(&tc35892_gpio_driver);
+}
+subsys_initcall(tc35892_gpio_init);
+
+static void __exit tc35892_gpio_exit(void)
+{
+ platform_driver_unregister(&tc35892_gpio_driver);
+}
+module_exit(tc35892_gpio_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TC35892 GPIO driver");
+MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 994d23beeb1d..57cea01c4ffb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1840,8 +1840,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
ret = copy_from_user(clips, clips_ptr,
num_clips * sizeof(*clips));
- if (ret)
+ if (ret) {
+ ret = -EFAULT;
goto out_err2;
+ }
}
if (fb->funcs->dirty) {
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 764401951041..9b2a54117c91 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -860,19 +860,24 @@ static void output_poll_execute(struct slow_work *work)
}
}
-void drm_kms_helper_poll_init(struct drm_device *dev)
+void drm_kms_helper_poll_disable(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+
+void drm_kms_helper_poll_enable(struct drm_device *dev)
{
- struct drm_connector *connector;
bool poll = false;
+ struct drm_connector *connector;
int ret;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled)
poll = true;
}
- slow_work_register_user(THIS_MODULE);
- delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
- &output_poll_ops);
if (poll) {
ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
@@ -880,11 +885,22 @@ void drm_kms_helper_poll_init(struct drm_device *dev)
DRM_ERROR("delayed enqueue failed %d\n", ret);
}
}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+ slow_work_register_user(THIS_MODULE);
+ delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
+ &output_poll_ops);
+ dev->mode_config.poll_enabled = true;
+
+ drm_kms_helper_poll_enable(dev);
+}
EXPORT_SYMBOL(drm_kms_helper_poll_init);
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ drm_kms_helper_poll_disable(dev);
slow_work_unregister_user(THIS_MODULE);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f569ae88ab38..c1981861bbbd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -147,7 +147,10 @@ drm_edid_block_valid(u8 *raw_edid)
csum += raw_edid[i];
if (csum) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
- goto bad;
+
+ /* allow CEA to slide through, switches mangle this */
+ if (raw_edid[0] != 0x02)
+ goto bad;
}
/* per-block-type checks */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index b3779d243aef..08c4c926e65f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -264,7 +264,7 @@ bool drm_fb_helper_force_kernel_mode(void)
int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
- DRM_ERROR("panic occurred, switching back to text console\n");
+ printk(KERN_ERR "panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
return 0;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 95639017bdbe..da78f2c0d909 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_fb.o \
intel_tv.o \
intel_dvo.o \
+ intel_ringbuffer.o \
intel_overlay.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 322070c0c631..52510ad8b25d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -77,7 +77,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
case ACTIVE_LIST:
seq_printf(m, "Active:\n");
lock = &dev_priv->mm.active_list_lock;
- head = &dev_priv->mm.active_list;
+ head = &dev_priv->render_ring.active_list;
break;
case INACTIVE_LIST:
seq_printf(m, "Inactive:\n");
@@ -129,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_i915_gem_request *gem_request;
seq_printf(m, "Request:\n");
- list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
+ list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
+ list) {
seq_printf(m, " %d @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
@@ -143,9 +144,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->hw_status_page != NULL) {
+ if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev));
+ i915_get_gem_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
@@ -195,9 +196,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- if (dev_priv->hw_status_page != NULL) {
+ if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev));
+ i915_get_gem_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
@@ -251,7 +252,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
int i;
volatile u32 *hws;
- hws = (volatile u32 *)dev_priv->hw_status_page;
+ hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
if (hws == NULL)
return 0;
@@ -287,7 +288,8 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
spin_lock(&dev_priv->mm.active_list_lock);
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
+ list) {
obj = &obj_priv->base;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
ret = i915_gem_object_get_pages(obj, 0);
@@ -317,14 +319,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
u8 *virt;
uint32_t *ptr, off;
- if (!dev_priv->ring.ring_obj) {
+ if (!dev_priv->render_ring.gem_object) {
seq_printf(m, "No ringbuffer setup\n");
return 0;
}
- virt = dev_priv->ring.virtual_start;
+ virt = dev_priv->render_ring.virtual_start;
- for (off = 0; off < dev_priv->ring.Size; off += 4) {
+ for (off = 0; off < dev_priv->render_ring.size; off += 4) {
ptr = (uint32_t *)(virt + off);
seq_printf(m, "%08x : %08x\n", off, *ptr);
}
@@ -344,7 +346,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
seq_printf(m, "RingHead : %08x\n", head);
seq_printf(m, "RingTail : %08x\n", tail);
- seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
+ seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
return 0;
@@ -489,11 +491,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvstat = I915_READ16(MEMSTAT_ILK);
- seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3);
- seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1);
- seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf,
- rgvswctl & 0x3f);
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ seq_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
return 0;
}
@@ -508,7 +513,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
for (i = 0; i < 16; i++) {
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
- seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq);
+ seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
+ (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
}
return 0;
@@ -541,6 +547,8 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
+ u16 crstandvid = I915_READ16(CRSTANDVID);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
"yes" : "no");
@@ -555,9 +563,13 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
seq_printf(m, "Starting frequency: P%d\n",
(rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
- seq_printf(m, "Max frequency: P%d\n",
+ seq_printf(m, "Max P-state: P%d\n",
(rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
- seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+ seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+ seq_printf(m, "Render standby enabled: %s\n",
+ (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
return 0;
}
@@ -621,6 +633,36 @@ static int i915_sr_status(struct seq_file *m, void *unused)
return 0;
}
+static int i915_emon_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long temp, chipset, gfx;
+
+ temp = i915_mch_val(dev_priv);
+ chipset = i915_chipset_val(dev_priv);
+ gfx = i915_gfx_val(dev_priv);
+
+ seq_printf(m, "GMCH temp: %ld\n", temp);
+ seq_printf(m, "Chipset power: %ld\n", chipset);
+ seq_printf(m, "GFX power: %ld\n", gfx);
+ seq_printf(m, "Total power: %ld\n", chipset + gfx);
+
+ return 0;
+}
+
+static int i915_gfxec(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+
+ return 0;
+}
+
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -743,6 +785,8 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_delayfreq_table", i915_delayfreq_table, 0},
{"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
+ {"i915_emon_status", i915_emon_status, 0},
+ {"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_sr_status", i915_sr_status, 0},
};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2a6b5de5ae5d..59a2bf8592ec 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,84 +40,6 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-/* Really want an OS-independent resettable timer. Would like to have
- * this loop run for (eg) 3 sec, but have the timer reset every time
- * the head pointer changes, so that EBUSY only happens if the ring
- * actually stalls for (eg) 3 seconds.
- */
-int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
- u32 last_acthd = I915_READ(acthd_reg);
- u32 acthd;
- u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- int i;
-
- trace_i915_ring_wait_begin (dev);
-
- for (i = 0; i < 100000; i++) {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- acthd = I915_READ(acthd_reg);
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
- if (ring->space >= n) {
- trace_i915_ring_wait_end (dev);
- return 0;
- }
-
- if (dev->primary->master) {
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- }
-
-
- if (ring->head != last_head)
- i = 0;
- if (acthd != last_acthd)
- i = 0;
-
- last_head = ring->head;
- last_acthd = acthd;
- msleep_interruptible(10);
-
- }
-
- trace_i915_ring_wait_end (dev);
- return -EBUSY;
-}
-
-/* As a ringbuffer is only allowed to wrap between instructions, fill
- * the tail with NOOPs.
- */
-int i915_wrap_ring(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- volatile unsigned int *virt;
- int rem;
-
- rem = dev_priv->ring.Size - dev_priv->ring.tail;
- if (dev_priv->ring.space < rem) {
- int ret = i915_wait_ring(dev, rem, __func__);
- if (ret)
- return ret;
- }
- dev_priv->ring.space -= rem;
-
- virt = (unsigned int *)
- (dev_priv->ring.virtual_start + dev_priv->ring.tail);
- rem /= 4;
- while (rem--)
- *virt++ = MI_NOOP;
-
- dev_priv->ring.tail = 0;
-
- return 0;
-}
-
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
@@ -133,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+ dev_priv->render_ring.status_page.page_addr
+ = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
if (IS_I965G(dev))
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -159,8 +82,8 @@ static void i915_free_hws(struct drm_device *dev)
dev_priv->status_page_dmah = NULL;
}
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
+ if (dev_priv->render_ring.status_page.gfx_addr) {
+ dev_priv->render_ring.status_page.gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
}
@@ -172,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ struct intel_ring_buffer *ring = &dev_priv->render_ring;
/*
* We should never lose context on the ring with modesetting
@@ -185,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
- ring->space += ring->Size;
+ ring->space += ring->size;
if (!dev->primary->master)
return;
@@ -205,12 +128,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
- if (dev_priv->ring.virtual_start) {
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- dev_priv->ring.virtual_start = NULL;
- dev_priv->ring.map.handle = NULL;
- dev_priv->ring.map.size = 0;
- }
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+ if (HAS_BSD(dev))
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
@@ -233,24 +153,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
if (init->ring_size != 0) {
- if (dev_priv->ring.ring_obj != NULL) {
+ if (dev_priv->render_ring.gem_object != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
- dev_priv->ring.Size = init->ring_size;
+ dev_priv->render_ring.size = init->ring_size;
- dev_priv->ring.map.offset = init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
+ dev_priv->render_ring.map.offset = init->ring_start;
+ dev_priv->render_ring.map.size = init->ring_size;
+ dev_priv->render_ring.map.type = 0;
+ dev_priv->render_ring.map.flags = 0;
+ dev_priv->render_ring.map.mtrr = 0;
- drm_core_ioremap_wc(&dev_priv->ring.map, dev);
+ drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
- if (dev_priv->ring.map.handle == NULL) {
+ if (dev_priv->render_ring.map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -258,7 +178,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
}
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+ dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
dev_priv->cpp = init->cpp;
dev_priv->back_offset = init->back_offset;
@@ -278,26 +198,29 @@ static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *ring;
DRM_DEBUG_DRIVER("%s\n", __func__);
- if (dev_priv->ring.map.handle == NULL) {
+ ring = &dev_priv->render_ring;
+
+ if (ring->map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
/* Program Hardware Status Page */
- if (!dev_priv->hw_status_page) {
+ if (!ring->status_page.page_addr) {
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
DRM_DEBUG_DRIVER("hw status page @ %p\n",
- dev_priv->hw_status_page);
-
- if (dev_priv->status_gfx_addr != 0)
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ ring->status_page.page_addr);
+ if (ring->status_page.gfx_addr != 0)
+ ring->setup_status_page(dev, ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
@@ -407,9 +330,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
- RING_LOCALS;
- if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+ if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
return -EINVAL;
BEGIN_LP_RING((dwords+1)&~1);
@@ -442,9 +364,7 @@ i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_clip_rect box = boxes[i];
- RING_LOCALS;
if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
@@ -481,7 +401,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- RING_LOCALS;
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -535,10 +454,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects;
int i = 0, count;
- RING_LOCALS;
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
@@ -587,7 +504,6 @@ static int i915_dispatch_flip(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
- RING_LOCALS;
if (!master_priv->sarea_priv)
return -EINVAL;
@@ -640,7 +556,8 @@ static int i915_quiescent(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
i915_kernel_lost_context(dev);
- return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+ return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
+ dev_priv->render_ring.size - 8);
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -827,6 +744,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
/* depends on GEM */
value = dev_priv->has_gem;
break;
+ case I915_PARAM_HAS_BSD:
+ value = HAS_BSD(dev);
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -882,6 +802,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
+ struct intel_ring_buffer *ring = &dev_priv->render_ring;
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -898,7 +819,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
- dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+ ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dev_priv->hws_map.size = 4*1024;
@@ -909,19 +830,19 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_core_ioremap_wc(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
i915_dma_cleanup(dev);
- dev_priv->status_gfx_addr = 0;
+ ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
- dev_priv->hw_status_page = dev_priv->hws_map.handle;
+ ring->status_page.page_addr = dev_priv->hws_map.handle;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
- dev_priv->status_gfx_addr);
+ ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws at %p\n",
- dev_priv->hw_status_page);
+ ring->status_page.page_addr);
return 0;
}
@@ -1399,12 +1320,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
- printk(KERN_INFO "i915: switched off\n");
+ printk(KERN_INFO "i915: switched on\n");
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
+ drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_ERR "i915: switched off\n");
+ drm_kms_helper_poll_disable(dev);
i915_suspend(dev, pmm);
}
}
@@ -1479,19 +1402,19 @@ static int i915_load_modeset_init(struct drm_device *dev,
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
if (ret)
- goto destroy_ringbuffer;
+ goto cleanup_ringbuffer;
ret = vga_switcheroo_register_client(dev->pdev,
i915_switcheroo_set_state,
i915_switcheroo_can_switch);
if (ret)
- goto destroy_ringbuffer;
+ goto cleanup_vga_client;
intel_modeset_init(dev);
ret = drm_irq_install(dev);
if (ret)
- goto destroy_ringbuffer;
+ goto cleanup_vga_switcheroo;
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
@@ -1503,11 +1426,20 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- intel_fbdev_init(dev);
+ ret = intel_fbdev_init(dev);
+ if (ret)
+ goto cleanup_irq;
+
drm_kms_helper_poll_init(dev);
return 0;
-destroy_ringbuffer:
+cleanup_irq:
+ drm_irq_uninstall(dev);
+cleanup_vga_switcheroo:
+ vga_switcheroo_unregister_client(dev->pdev);
+cleanup_vga_client:
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+cleanup_ringbuffer:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
@@ -1539,14 +1471,11 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
-static void i915_get_mem_freq(struct drm_device *dev)
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 tmp;
- if (!IS_PINEVIEW(dev))
- return;
-
tmp = I915_READ(CLKCFG);
switch (tmp & CLKCFG_FSB_MASK) {
@@ -1575,7 +1504,524 @@ static void i915_get_mem_freq(struct drm_device *dev)
dev_priv->mem_freq = 800;
break;
}
+
+ /* detect pineview DDR3 setting */
+ tmp = I915_READ(CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 ddrpll, csipll;
+
+ ddrpll = I915_READ16(DDRMPLL1);
+ csipll = I915_READ16(CSIPLL0);
+
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ dev_priv->r_t = dev_priv->mem_freq;
+
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+
+ if (dev_priv->fsb_freq == 3200) {
+ dev_priv->c_m = 0;
+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+ dev_priv->c_m = 1;
+ } else {
+ dev_priv->c_m = 2;
+ }
+}
+
+struct v_table {
+ u8 vid;
+ unsigned long vd; /* in .1 mil */
+ unsigned long vm; /* in .1 mil */
+ u8 pvid;
+};
+
+static struct v_table v_table[] = {
+ { 0, 16125, 15000, 0x7f, },
+ { 1, 16000, 14875, 0x7e, },
+ { 2, 15875, 14750, 0x7d, },
+ { 3, 15750, 14625, 0x7c, },
+ { 4, 15625, 14500, 0x7b, },
+ { 5, 15500, 14375, 0x7a, },
+ { 6, 15375, 14250, 0x79, },
+ { 7, 15250, 14125, 0x78, },
+ { 8, 15125, 14000, 0x77, },
+ { 9, 15000, 13875, 0x76, },
+ { 10, 14875, 13750, 0x75, },
+ { 11, 14750, 13625, 0x74, },
+ { 12, 14625, 13500, 0x73, },
+ { 13, 14500, 13375, 0x72, },
+ { 14, 14375, 13250, 0x71, },
+ { 15, 14250, 13125, 0x70, },
+ { 16, 14125, 13000, 0x6f, },
+ { 17, 14000, 12875, 0x6e, },
+ { 18, 13875, 12750, 0x6d, },
+ { 19, 13750, 12625, 0x6c, },
+ { 20, 13625, 12500, 0x6b, },
+ { 21, 13500, 12375, 0x6a, },
+ { 22, 13375, 12250, 0x69, },
+ { 23, 13250, 12125, 0x68, },
+ { 24, 13125, 12000, 0x67, },
+ { 25, 13000, 11875, 0x66, },
+ { 26, 12875, 11750, 0x65, },
+ { 27, 12750, 11625, 0x64, },
+ { 28, 12625, 11500, 0x63, },
+ { 29, 12500, 11375, 0x62, },
+ { 30, 12375, 11250, 0x61, },
+ { 31, 12250, 11125, 0x60, },
+ { 32, 12125, 11000, 0x5f, },
+ { 33, 12000, 10875, 0x5e, },
+ { 34, 11875, 10750, 0x5d, },
+ { 35, 11750, 10625, 0x5c, },
+ { 36, 11625, 10500, 0x5b, },
+ { 37, 11500, 10375, 0x5a, },
+ { 38, 11375, 10250, 0x59, },
+ { 39, 11250, 10125, 0x58, },
+ { 40, 11125, 10000, 0x57, },
+ { 41, 11000, 9875, 0x56, },
+ { 42, 10875, 9750, 0x55, },
+ { 43, 10750, 9625, 0x54, },
+ { 44, 10625, 9500, 0x53, },
+ { 45, 10500, 9375, 0x52, },
+ { 46, 10375, 9250, 0x51, },
+ { 47, 10250, 9125, 0x50, },
+ { 48, 10125, 9000, 0x4f, },
+ { 49, 10000, 8875, 0x4e, },
+ { 50, 9875, 8750, 0x4d, },
+ { 51, 9750, 8625, 0x4c, },
+ { 52, 9625, 8500, 0x4b, },
+ { 53, 9500, 8375, 0x4a, },
+ { 54, 9375, 8250, 0x49, },
+ { 55, 9250, 8125, 0x48, },
+ { 56, 9125, 8000, 0x47, },
+ { 57, 9000, 7875, 0x46, },
+ { 58, 8875, 7750, 0x45, },
+ { 59, 8750, 7625, 0x44, },
+ { 60, 8625, 7500, 0x43, },
+ { 61, 8500, 7375, 0x42, },
+ { 62, 8375, 7250, 0x41, },
+ { 63, 8250, 7125, 0x40, },
+ { 64, 8125, 7000, 0x3f, },
+ { 65, 8000, 6875, 0x3e, },
+ { 66, 7875, 6750, 0x3d, },
+ { 67, 7750, 6625, 0x3c, },
+ { 68, 7625, 6500, 0x3b, },
+ { 69, 7500, 6375, 0x3a, },
+ { 70, 7375, 6250, 0x39, },
+ { 71, 7250, 6125, 0x38, },
+ { 72, 7125, 6000, 0x37, },
+ { 73, 7000, 5875, 0x36, },
+ { 74, 6875, 5750, 0x35, },
+ { 75, 6750, 5625, 0x34, },
+ { 76, 6625, 5500, 0x33, },
+ { 77, 6500, 5375, 0x32, },
+ { 78, 6375, 5250, 0x31, },
+ { 79, 6250, 5125, 0x30, },
+ { 80, 6125, 5000, 0x2f, },
+ { 81, 6000, 4875, 0x2e, },
+ { 82, 5875, 4750, 0x2d, },
+ { 83, 5750, 4625, 0x2c, },
+ { 84, 5625, 4500, 0x2b, },
+ { 85, 5500, 4375, 0x2a, },
+ { 86, 5375, 4250, 0x29, },
+ { 87, 5250, 4125, 0x28, },
+ { 88, 5125, 4000, 0x27, },
+ { 89, 5000, 3875, 0x26, },
+ { 90, 4875, 3750, 0x25, },
+ { 91, 4750, 3625, 0x24, },
+ { 92, 4625, 3500, 0x23, },
+ { 93, 4500, 3375, 0x22, },
+ { 94, 4375, 3250, 0x21, },
+ { 95, 4250, 3125, 0x20, },
+ { 96, 4125, 3000, 0x1f, },
+ { 97, 4125, 3000, 0x1e, },
+ { 98, 4125, 3000, 0x1d, },
+ { 99, 4125, 3000, 0x1c, },
+ { 100, 4125, 3000, 0x1b, },
+ { 101, 4125, 3000, 0x1a, },
+ { 102, 4125, 3000, 0x19, },
+ { 103, 4125, 3000, 0x18, },
+ { 104, 4125, 3000, 0x17, },
+ { 105, 4125, 3000, 0x16, },
+ { 106, 4125, 3000, 0x15, },
+ { 107, 4125, 3000, 0x14, },
+ { 108, 4125, 3000, 0x13, },
+ { 109, 4125, 3000, 0x12, },
+ { 110, 4125, 3000, 0x11, },
+ { 111, 4125, 3000, 0x10, },
+ { 112, 4125, 3000, 0x0f, },
+ { 113, 4125, 3000, 0x0e, },
+ { 114, 4125, 3000, 0x0d, },
+ { 115, 4125, 3000, 0x0c, },
+ { 116, 4125, 3000, 0x0b, },
+ { 117, 4125, 3000, 0x0a, },
+ { 118, 4125, 3000, 0x09, },
+ { 119, 4125, 3000, 0x08, },
+ { 120, 1125, 0, 0x07, },
+ { 121, 1000, 0, 0x06, },
+ { 122, 875, 0, 0x05, },
+ { 123, 750, 0, 0x04, },
+ { 124, 625, 0, 0x03, },
+ { 125, 500, 0, 0x02, },
+ { 126, 375, 0, 0x01, },
+ { 127, 0, 0, 0x00, },
+};
+
+struct cparams {
+ int i;
+ int t;
+ int m;
+ int c;
+};
+
+static struct cparams cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ u64 total_count, diff, ret;
+ u32 count1, count2, count3, m = 0, c = 0;
+ unsigned long now = jiffies_to_msecs(jiffies), diff1;
+ int i;
+
+ diff1 = now - dev_priv->last_time1;
+
+ count1 = I915_READ(DMIEC);
+ count2 = I915_READ(DDREC);
+ count3 = I915_READ(CSIEC);
+
+ total_count = count1 + count2 + count3;
+
+ /* FIXME: handle per-counter overflow */
+ if (total_count < dev_priv->last_count1) {
+ diff = ~0UL - dev_priv->last_count1;
+ diff += total_count;
+ } else {
+ diff = total_count - dev_priv->last_count1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == dev_priv->c_m &&
+ cparams[i].t == dev_priv->r_t) {
+ m = cparams[i].m;
+ c = cparams[i].c;
+ break;
+ }
+ }
+
+ div_u64(diff, diff1);
+ ret = ((m * diff) + c);
+ div_u64(ret, 10);
+
+ dev_priv->last_count1 = total_count;
+ dev_priv->last_time1 = now;
+
+ return ret;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long m, x, b;
+ u32 tsfs;
+
+ tsfs = I915_READ(TSFS);
+
+ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+ x = I915_READ8(TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+
+ return ((m * x) / 127) - b;
+}
+
+static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+ unsigned long val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(v_table); i++) {
+ if (v_table[i].pvid == pxvid) {
+ if (IS_MOBILE(dev_priv->dev))
+ val = v_table[i].vm;
+ else
+ val = v_table[i].vd;
+ }
+ }
+
+ return val;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ struct timespec now, diff1;
+ u64 diff;
+ unsigned long diffms;
+ u32 count;
+
+ getrawmonotonic(&now);
+ diff1 = timespec_sub(now, dev_priv->last_time2);
+
+ /* Don't divide by 0 */
+ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+ if (!diffms)
+ return;
+
+ count = I915_READ(GFXEC);
+
+ if (count < dev_priv->last_count2) {
+ diff = ~0UL - dev_priv->last_count2;
+ diff += count;
+ } else {
+ diff = count - dev_priv->last_count2;
+ }
+
+ dev_priv->last_count2 = count;
+ dev_priv->last_time2 = now;
+
+ /* More magic constants... */
+ diff = diff * 1181;
+ div_u64(diff, diffms * 10);
+ dev_priv->gfx_power = diff;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+ state1 = ext_v;
+
+ t = i915_mch_val(dev_priv);
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ if (t > 80)
+ corr = ((t * 2349) + 135940);
+ else if (t >= 50)
+ corr = ((t * 964) + 29317);
+ else /* < 50 */
+ corr = ((t * 301) + 1004);
+
+ corr = corr * ((150142 * state1) / 10000 - 78642);
+ corr /= 100000;
+ corr2 = (corr * dev_priv->corr);
+
+ state2 = (corr2 * state1) / 10000;
+ state2 /= 100; /* convert to mW */
+
+ i915_update_gfx_val(dev_priv);
+
+ return dev_priv->gfx_power + state2;
+}
+
+/* Global for IPS driver to get at the current i915 device */
+static struct drm_i915_private *i915_mch_dev;
+/*
+ * Lock protecting IPS related data structures
+ * - i915_mch_dev
+ * - dev_priv->max_delay
+ * - dev_priv->min_delay
+ * - dev_priv->fmax
+ * - dev_priv->gpu_busy
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *dev_priv;
+ unsigned long chipset_val, graphics_val, ret = 0;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ chipset_val = i915_chipset_val(dev_priv);
+ graphics_val = i915_gfx_val(dev_priv);
+
+ ret = chipset_val + graphics_val;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay > dev_priv->fmax)
+ dev_priv->max_delay--;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay < dev_priv->min_delay)
+ dev_priv->max_delay++;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = false;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ ret = dev_priv->busy;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ dev_priv->max_delay = dev_priv->fstart;
+
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ ret = false;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
/**
* i915_driver_load - setup chip and create an initial config
@@ -1594,7 +2040,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
resource_size_t base, size;
int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
-
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -1672,6 +2117,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->has_gem = 0;
}
+ if (dev_priv->has_gem == 0 &&
+ drm_core_check_feature(dev, DRIVER_MODESET)) {
+ DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
+ ret = -ENODEV;
+ goto out_iomapfree;
+ }
+
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
@@ -1691,7 +2143,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_workqueue_free;
}
- i915_get_mem_freq(dev);
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+ else if (IS_IRONLAKE(dev))
+ i915_ironlake_get_mem_freq(dev);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
@@ -1709,7 +2164,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->user_irq_lock);
spin_lock_init(&dev_priv->error_lock);
- dev_priv->user_irq_refcount = 0;
dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
@@ -1738,6 +2192,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
+
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = dev_priv;
+ dev_priv->mchdev_lock = &mchdev_lock;
+ spin_unlock(&mchdev_lock);
+
return 0;
out_workqueue_free:
@@ -1759,6 +2219,10 @@ int i915_driver_unload(struct drm_device *dev)
i915_destroy_error_state(dev);
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = NULL;
+ spin_unlock(&mchdev_lock);
+
destroy_workqueue(dev_priv->wq);
del_timer_sync(&dev_priv->hangcheck_timer);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5c51e45ab68d..423dc90c1e20 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -60,95 +60,95 @@ extern int intel_agp_enabled;
.subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long) info }
-const static struct intel_device_info intel_i830_info = {
+static const struct intel_device_info intel_i830_info = {
.is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_845g_info = {
+static const struct intel_device_info intel_845g_info = {
.is_i8xx = 1,
};
-const static struct intel_device_info intel_i85x_info = {
+static const struct intel_device_info intel_i85x_info = {
.is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i865g_info = {
+static const struct intel_device_info intel_i865g_info = {
.is_i8xx = 1,
};
-const static struct intel_device_info intel_i915g_info = {
+static const struct intel_device_info intel_i915g_info = {
.is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i915gm_info = {
+static const struct intel_device_info intel_i915gm_info = {
.is_i9xx = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i945g_info = {
+static const struct intel_device_info intel_i945g_info = {
.is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i945gm_info = {
+static const struct intel_device_info intel_i945gm_info = {
.is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i965g_info = {
+static const struct intel_device_info intel_i965g_info = {
.is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
};
-const static struct intel_device_info intel_i965gm_info = {
+static const struct intel_device_info intel_i965gm_info = {
.is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_g33_info = {
+static const struct intel_device_info intel_g33_info = {
.is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_g45_info = {
+static const struct intel_device_info intel_g45_info = {
.is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_pipe_cxsr = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_gm45_info = {
+static const struct intel_device_info intel_gm45_info = {
.is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
.has_pipe_cxsr = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_pineview_info = {
+static const struct intel_device_info intel_pineview_info = {
.is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
.need_gfx_hws = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_ironlake_d_info = {
+static const struct intel_device_info intel_ironlake_d_info = {
.is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_pipe_cxsr = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_ironlake_m_info = {
+static const struct intel_device_info intel_ironlake_m_info = {
.is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
.need_gfx_hws = 1, .has_rc6 = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_sandybridge_d_info = {
+static const struct intel_device_info intel_sandybridge_d_info = {
.is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_hotplug = 1, .is_gen6 = 1,
};
-const static struct intel_device_info intel_sandybridge_m_info = {
+static const struct intel_device_info intel_sandybridge_m_info = {
.is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_hotplug = 1, .is_gen6 = 1,
};
-const static struct pci_device_id pciidlist[] = {
+static const struct pci_device_id pciidlist[] = {
INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
/*
* Clear request list
*/
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
if (need_display)
i915_save_display(dev);
@@ -370,6 +370,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
}
} else {
DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+ mutex_unlock(&dev->struct_mutex);
return -ENODEV;
}
@@ -388,33 +389,10 @@ int i965_reset(struct drm_device *dev, u8 flags)
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev_priv->mm.suspended) {
- drm_i915_ring_buffer_t *ring = &dev_priv->ring;
- struct drm_gem_object *obj = ring->ring_obj;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ !dev_priv->mm.suspended) {
+ struct intel_ring_buffer *ring = &dev_priv->render_ring;
dev_priv->mm.suspended = 0;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_TAIL, 0);
- I915_WRITE(PRB0_HEAD, 0);
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- I915_WRITE(PRB0_CTL,
- ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
- }
-
+ ring->init(dev, ring);
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7f797ef1ab39..276583159847 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,6 +32,7 @@
#include "i915_reg.h"
#include "intel_bios.h"
+#include "intel_ringbuffer.h"
#include <linux/io-mapping.h>
/* General customization:
@@ -55,6 +56,8 @@ enum plane {
#define I915_NUM_PIPE 2
+#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+
/* Interface history:
*
* 1.1: Original.
@@ -89,16 +92,6 @@ struct drm_i915_gem_phys_object {
struct drm_gem_object *cur_obj;
};
-typedef struct _drm_i915_ring_buffer {
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
- struct drm_gem_object *ring_obj;
-} drm_i915_ring_buffer_t;
-
struct mem_block {
struct mem_block *next;
struct mem_block *prev;
@@ -241,17 +234,15 @@ typedef struct drm_i915_private {
void __iomem *regs;
struct pci_dev *bridge_dev;
- drm_i915_ring_buffer_t ring;
+ struct intel_ring_buffer render_ring;
+ struct intel_ring_buffer bsd_ring;
drm_dma_handle_t *status_page_dmah;
- void *hw_status_page;
void *seqno_page;
dma_addr_t dma_status_page;
uint32_t counter;
- unsigned int status_gfx_addr;
unsigned int seqno_gfx_addr;
drm_local_map_t hws_map;
- struct drm_gem_object *hws_obj;
struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
@@ -267,8 +258,6 @@ typedef struct drm_i915_private {
atomic_t irq_received;
/** Protects user_irq_refcount and irq_mask_reg */
spinlock_t user_irq_lock;
- /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
- int user_irq_refcount;
u32 trace_irq_seqno;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask_reg;
@@ -289,6 +278,7 @@ typedef struct drm_i915_private {
struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
+ int num_pipe;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
@@ -334,7 +324,7 @@ typedef struct drm_i915_private {
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
- unsigned int fsb_freq, mem_freq;
+ unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
struct drm_i915_error_state *first_error;
@@ -514,18 +504,7 @@ typedef struct drm_i915_private {
*/
struct list_head shrink_list;
- /**
- * List of objects currently involved in rendering from the
- * ringbuffer.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
spinlock_t active_list_lock;
- struct list_head active_list;
/**
* List of objects which are not in the ringbuffer but which
@@ -563,12 +542,6 @@ typedef struct drm_i915_private {
struct list_head fence_list;
/**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
-
- /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -644,6 +617,18 @@ typedef struct drm_i915_private {
u8 cur_delay;
u8 min_delay;
u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ int c_m;
+ int r_t;
+ u8 corr;
+ spinlock_t *mchdev_lock;
enum no_fbc_reason no_fbc_reason;
@@ -671,19 +656,64 @@ struct drm_i915_gem_object {
* (has pending rendering), and is not set if it's on inactive (ready
* to be unbound).
*/
- int active;
+ unsigned int active : 1;
/**
* This is set if the object has been written to since last bound
* to the GTT
*/
- int dirty;
+ unsigned int dirty : 1;
+
+ /**
+ * Fence register bits (if any) for this object. Will be set
+ * as needed when mapped into the GTT.
+ * Protected by dev->struct_mutex.
+ *
+ * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
+ */
+ int fence_reg : 5;
+
+ /**
+ * Used for checking the object doesn't appear more than once
+ * in an execbuffer object list.
+ */
+ unsigned int in_execbuffer : 1;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv : 2;
+
+ /**
+ * Refcount for the pages array. With the current locking scheme, there
+ * are at most two concurrent users: Binding a bo to the gtt and
+ * pwrite/pread using physical addresses. So two bits for a maximum
+ * of two users are enough.
+ */
+ unsigned int pages_refcount : 2;
+#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
+
+ /**
+ * Current tiling mode for the object.
+ */
+ unsigned int tiling_mode : 2;
+
+ /** How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, pin_ioctl
+ * (via user_pin_count), execbuffer (objects are not allowed multiple
+ * times for the same batchbuffer), and the framebuffer code. When
+ * switching/pageflipping, the framebuffer code has at most two buffers
+ * pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits. */
+ int pin_count : 4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/** AGP memory structure for our GTT binding. */
DRM_AGP_MEM *agp_mem;
struct page **pages;
- int pages_refcount;
/**
* Current offset of the object in GTT space.
@@ -692,26 +722,18 @@ struct drm_i915_gem_object {
*/
uint32_t gtt_offset;
+ /* Which ring is refering to is this object */
+ struct intel_ring_buffer *ring;
+
/**
* Fake offset for use by mmap(2)
*/
uint64_t mmap_offset;
- /**
- * Fence register bits (if any) for this object. Will be set
- * as needed when mapped into the GTT.
- * Protected by dev->struct_mutex.
- */
- int fence_reg;
-
- /** How many users have pinned this object in GTT space */
- int pin_count;
-
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
- /** Current tiling mode for the object. */
- uint32_t tiling_mode;
+ /** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
/** Record of address bit 17 of each page at last unbind. */
@@ -734,17 +756,6 @@ struct drm_i915_gem_object {
struct drm_i915_gem_phys_object *phys_obj;
/**
- * Used for checking the object doesn't appear more than once
- * in an execbuffer object list.
- */
- int in_execbuffer;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- int madv;
-
- /**
* Number of crtcs where this object is currently the fb, but
* will be page flipped away on the next vblank. When it
* reaches 0, dev_priv->pending_flip_queue will be woken up.
@@ -765,6 +776,9 @@ struct drm_i915_gem_object {
* an emission time with seqnos for tracking how far ahead of the GPU we are.
*/
struct drm_i915_gem_request {
+ /** On Which ring this request was generated */
+ struct intel_ring_buffer *ring;
+
/** GEM sequence number associated with this request. */
uint32_t seqno;
@@ -821,6 +835,11 @@ extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
extern int i965_reset(struct drm_device *dev, u8 flags);
+extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
@@ -829,9 +848,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_user_irq_get(struct drm_device *dev);
void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-void i915_user_irq_put(struct drm_device *dev);
extern void i915_enable_interrupt (struct drm_device *dev);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -849,6 +866,11 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
+ u32 mask);
+extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
+ u32 mask);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -922,11 +944,13 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
void i915_gem_retire_work_handler(struct work_struct *work);
void i915_gem_clflush_object(struct drm_gem_object *obj);
int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -937,9 +961,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t flush_domains);
-int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
+uint32_t i915_add_request(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t flush_domains,
+ struct intel_ring_buffer *ring);
+int i915_do_wait_request(struct drm_device *dev,
+ uint32_t seqno, int interruptible,
+ struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
@@ -1015,7 +1043,7 @@ extern void g4x_disable_fbc(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
-
+extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
@@ -1026,7 +1054,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
* has access to the ring.
*/
#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
- if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
+ if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
+ == NULL) \
LOCK_TEST_WITH_RETURN(dev, file_priv); \
} while (0)
@@ -1039,35 +1068,31 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
#define I915_READ64(reg) readq(dev_priv->regs + (reg))
#define POSTING_READ(reg) (void)I915_READ(reg)
+#define POSTING_READ16(reg) (void)I915_READ16(reg)
#define I915_VERBOSE 0
-#define RING_LOCALS volatile unsigned int *ring_virt__;
-
-#define BEGIN_LP_RING(n) do { \
- int bytes__ = 4*(n); \
- if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
- /* a wrap must occur between instructions so pad beforehand */ \
- if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
- i915_wrap_ring(dev); \
- if (unlikely (dev_priv->ring.space < bytes__)) \
- i915_wait_ring(dev, bytes__, __func__); \
- ring_virt__ = (unsigned int *) \
- (dev_priv->ring.virtual_start + dev_priv->ring.tail); \
- dev_priv->ring.tail += bytes__; \
- dev_priv->ring.tail &= dev_priv->ring.Size - 1; \
- dev_priv->ring.space -= bytes__; \
+#define BEGIN_LP_RING(n) do { \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
+ if (I915_VERBOSE) \
+ DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
+ intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \
} while (0)
-#define OUT_RING(n) do { \
- if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *ring_virt__++ = (n); \
+
+#define OUT_RING(x) do { \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
+ if (I915_VERBOSE) \
+ DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
+ intel_ring_emit(dev, &dev_priv->render_ring, x); \
} while (0)
#define ADVANCE_LP_RING() do { \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \
- DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \
- I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \
+ DRM_DEBUG("ADVANCE_LP_RING %x\n", \
+ dev_priv->render_ring.tail); \
+ intel_ring_advance(dev, &dev_priv->render_ring); \
} while(0)
/**
@@ -1085,14 +1110,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
+ (dev_priv->render_ring.status_page.page_addr))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
-extern int i915_wrap_ring(struct drm_device * dev);
-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
@@ -1138,6 +1161,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2A42 || \
(dev)->pci_device == 0x2E42)
+#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 112699f71fa4..9ded3dae6c87 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,8 +35,6 @@
#include <linux/swap.h>
#include <linux/pci.h>
-#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-
static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -169,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
obj_priv->tiling_mode != I915_TILING_NONE;
}
-static inline int
+static inline void
slow_shmem_copy(struct page *dst_page,
int dst_offset,
struct page *src_page,
@@ -178,25 +176,16 @@ slow_shmem_copy(struct page *dst_page,
{
char *dst_vaddr, *src_vaddr;
- dst_vaddr = kmap_atomic(dst_page, KM_USER0);
- if (dst_vaddr == NULL)
- return -ENOMEM;
-
- src_vaddr = kmap_atomic(src_page, KM_USER1);
- if (src_vaddr == NULL) {
- kunmap_atomic(dst_vaddr, KM_USER0);
- return -ENOMEM;
- }
+ dst_vaddr = kmap(dst_page);
+ src_vaddr = kmap(src_page);
memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
- kunmap_atomic(src_vaddr, KM_USER1);
- kunmap_atomic(dst_vaddr, KM_USER0);
-
- return 0;
+ kunmap(src_page);
+ kunmap(dst_page);
}
-static inline int
+static inline void
slow_shmem_bit17_copy(struct page *gpu_page,
int gpu_offset,
struct page *cpu_page,
@@ -216,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
cpu_page, cpu_offset, length);
}
- gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
- if (gpu_vaddr == NULL)
- return -ENOMEM;
-
- cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
- if (cpu_vaddr == NULL) {
- kunmap_atomic(gpu_vaddr, KM_USER0);
- return -ENOMEM;
- }
+ gpu_vaddr = kmap(gpu_page);
+ cpu_vaddr = kmap(cpu_page);
/* Copy the data, XORing A6 with A17 (1). The user already knows he's
* XORing with the other bits (A9 for Y, A9 and A10 for X)
@@ -248,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
length -= this_length;
}
- kunmap_atomic(cpu_vaddr, KM_USER1);
- kunmap_atomic(gpu_vaddr, KM_USER0);
-
- return 0;
+ kunmap(cpu_page);
+ kunmap(gpu_page);
}
/**
@@ -427,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
page_length = PAGE_SIZE - data_page_offset;
if (do_bit17_swizzling) {
- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 1);
- } else {
- ret = slow_shmem_copy(user_pages[data_page_index],
- data_page_offset,
- obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
shmem_page_offset,
- page_length);
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length,
+ 1);
+ } else {
+ slow_shmem_copy(user_pages[data_page_index],
+ data_page_offset,
+ obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ page_length);
}
- if (ret)
- goto fail_put_pages;
remain -= page_length;
data_ptr += page_length;
@@ -531,25 +509,24 @@ fast_user_write(struct io_mapping *mapping,
* page faults
*/
-static inline int
+static inline void
slow_kernel_write(struct io_mapping *mapping,
loff_t gtt_base, int gtt_offset,
struct page *user_page, int user_offset,
int length)
{
- char *src_vaddr, *dst_vaddr;
- unsigned long unwritten;
+ char __iomem *dst_vaddr;
+ char *src_vaddr;
- dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
- src_vaddr = kmap_atomic(user_page, KM_USER1);
- unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
- src_vaddr + user_offset,
- length);
- kunmap_atomic(src_vaddr, KM_USER1);
- io_mapping_unmap_atomic(dst_vaddr);
- if (unwritten)
- return -EFAULT;
- return 0;
+ dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
+ src_vaddr = kmap(user_page);
+
+ memcpy_toio(dst_vaddr + gtt_offset,
+ src_vaddr + user_offset,
+ length);
+
+ kunmap(user_page);
+ io_mapping_unmap(dst_vaddr);
}
static inline int
@@ -722,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
- gtt_page_base, gtt_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
-
- /* If we get a fault while copying data, then (presumably) our
- * source page isn't available. Return the error and we'll
- * retry in the slow path.
- */
- if (ret)
- goto out_unpin_object;
+ slow_kernel_write(dev_priv->mm.gtt_mapping,
+ gtt_page_base, gtt_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
remain -= page_length;
offset += page_length;
@@ -902,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
page_length = PAGE_SIZE - data_page_offset;
if (do_bit17_swizzling) {
- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 0);
- } else {
- ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
- page_length);
+ page_length,
+ 0);
+ } else {
+ slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
}
- if (ret)
- goto fail_put_pages;
remain -= page_length;
data_ptr += page_length;
@@ -973,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (obj_priv->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- dev->gtt_total != 0) {
+ dev->gtt_total != 0 &&
+ obj->write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
if (ret == -EFAULT) {
ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
@@ -1484,11 +1453,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
}
static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+ struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ BUG_ON(ring == NULL);
+ obj_priv->ring = ring;
/* Add a reference if we're newly entering the active list. */
if (!obj_priv->active) {
@@ -1497,8 +1469,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
}
/* Move from whatever list we were on to the tail of execution. */
spin_lock(&dev_priv->mm.active_list_lock);
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.active_list);
+ list_move_tail(&obj_priv->list, &ring->active_list);
spin_unlock(&dev_priv->mm.active_list_lock);
obj_priv->last_rendering_seqno = seqno;
}
@@ -1551,6 +1522,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
BUG_ON(!list_empty(&obj_priv->gpu_write_list));
obj_priv->last_rendering_seqno = 0;
+ obj_priv->ring = NULL;
if (obj_priv->active) {
obj_priv->active = 0;
drm_gem_object_unreference(obj);
@@ -1560,7 +1532,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
static void
i915_gem_process_flushing_list(struct drm_device *dev,
- uint32_t flush_domains, uint32_t seqno)
+ uint32_t flush_domains, uint32_t seqno,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv, *next;
@@ -1571,12 +1544,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
struct drm_gem_object *obj = &obj_priv->base;
if ((obj->write_domain & flush_domains) ==
- obj->write_domain) {
+ obj->write_domain &&
+ obj_priv->ring->ring_flag == ring->ring_flag) {
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, seqno);
+ i915_gem_object_move_to_active(obj, seqno, ring);
/* update the fence lru list */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1593,31 +1567,15 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
}
-#define PIPE_CONTROL_FLUSH(addr) \
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
- PIPE_CONTROL_DEPTH_STALL); \
- OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
- OUT_RING(0); \
- OUT_RING(0); \
-
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
uint32_t
i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t flush_domains)
+ uint32_t flush_domains, struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_file_private *i915_file_priv = NULL;
struct drm_i915_gem_request *request;
uint32_t seqno;
int was_empty;
- RING_LOCALS;
if (file_priv != NULL)
i915_file_priv = file_priv->driver_priv;
@@ -1626,62 +1584,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if (request == NULL)
return 0;
- /* Grab the seqno we're going to make this request be, and bump the
- * next (skipping 0 so it can be the reserved no-seqno value).
- */
- seqno = dev_priv->mm.next_gem_seqno;
- dev_priv->mm.next_gem_seqno++;
- if (dev_priv->mm.next_gem_seqno == 0)
- dev_priv->mm.next_gem_seqno++;
-
- if (HAS_PIPE_CONTROL(dev)) {
- u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
-
- /*
- * Workaround qword write incoherence by flushing the
- * PIPE_NOTIFY buffers out to memory before requesting
- * an interrupt.
- */
- BEGIN_LP_RING(32);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128; /* write to separate cachelines */
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
- PIPE_CONTROL_NOTIFY);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
-
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
-
- DRM_DEBUG_DRIVER("%d\n", seqno);
+ seqno = ring->add_request(dev, ring, file_priv, flush_domains);
request->seqno = seqno;
+ request->ring = ring;
request->emitted_jiffies = jiffies;
- was_empty = list_empty(&dev_priv->mm.request_list);
- list_add_tail(&request->list, &dev_priv->mm.request_list);
+ was_empty = list_empty(&ring->request_list);
+ list_add_tail(&request->list, &ring->request_list);
+
if (i915_file_priv) {
list_add_tail(&request->client_list,
&i915_file_priv->mm.request_list);
@@ -1693,7 +1603,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
* domain we're flushing with our flush.
*/
if (flush_domains != 0)
- i915_gem_process_flushing_list(dev, flush_domains, seqno);
+ i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
if (!dev_priv->mm.suspended) {
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1710,20 +1620,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
* before signalling the CPU
*/
static uint32_t
-i915_retire_commands(struct drm_device *dev)
+i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
uint32_t flush_domains = 0;
- RING_LOCALS;
/* The sampler always gets flushed on i965 (sigh) */
if (IS_I965G(dev))
flush_domains |= I915_GEM_DOMAIN_SAMPLER;
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(0); /* noop */
- ADVANCE_LP_RING();
+
+ ring->flush(dev, ring,
+ I915_GEM_DOMAIN_COMMAND, flush_domains);
return flush_domains;
}
@@ -1743,11 +1649,11 @@ i915_gem_retire_request(struct drm_device *dev,
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
spin_lock(&dev_priv->mm.active_list_lock);
- while (!list_empty(&dev_priv->mm.active_list)) {
+ while (!list_empty(&request->ring->active_list)) {
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
+ obj_priv = list_first_entry(&request->ring->active_list,
struct drm_i915_gem_object,
list);
obj = &obj_priv->base;
@@ -1794,35 +1700,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
}
uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
+i915_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (HAS_PIPE_CONTROL(dev))
- return ((volatile u32 *)(dev_priv->seqno_page))[0];
- else
- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+ return ring->get_gem_seqno(dev, ring);
}
/**
* This function clears the request list as sequence numbers are passed.
*/
void
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
- if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
+ if (!ring->status_page.page_addr
+ || list_empty(&ring->request_list))
return;
- seqno = i915_get_gem_seqno(dev);
+ seqno = i915_get_gem_seqno(dev, ring);
- while (!list_empty(&dev_priv->mm.request_list)) {
+ while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
uint32_t retiring_seqno;
- request = list_first_entry(&dev_priv->mm.request_list,
+ request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
retiring_seqno = request->seqno;
@@ -1840,7 +1744,8 @@ i915_gem_retire_requests(struct drm_device *dev)
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
- i915_user_irq_put(dev);
+
+ ring->user_irq_put(dev, ring);
dev_priv->trace_irq_seqno = 0;
}
}
@@ -1856,15 +1761,22 @@ i915_gem_retire_work_handler(struct work_struct *work)
dev = dev_priv->dev;
mutex_lock(&dev->struct_mutex);
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+
if (!dev_priv->mm.suspended &&
- !list_empty(&dev_priv->mm.request_list))
+ (!list_empty(&dev_priv->render_ring.request_list) ||
+ (HAS_BSD(dev) &&
+ !list_empty(&dev_priv->bsd_ring.request_list))))
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+ int interruptible, struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier;
@@ -1875,7 +1787,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
if (atomic_read(&dev_priv->mm.wedged))
return -EIO;
- if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+ if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
@@ -1889,19 +1801,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
trace_i915_gem_request_wait_begin(dev, seqno);
- dev_priv->mm.waiting_gem_seqno = seqno;
- i915_user_irq_get(dev);
+ ring->waiting_gem_seqno = seqno;
+ ring->user_irq_get(dev, ring);
if (interruptible)
- ret = wait_event_interruptible(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
- atomic_read(&dev_priv->mm.wedged));
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(
+ ring->get_gem_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
else
- wait_event(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
- atomic_read(&dev_priv->mm.wedged));
+ wait_event(ring->irq_queue,
+ i915_seqno_passed(
+ ring->get_gem_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
- i915_user_irq_put(dev);
- dev_priv->mm.waiting_gem_seqno = 0;
+ ring->user_irq_put(dev, ring);
+ ring->waiting_gem_seqno = 0;
trace_i915_gem_request_wait_end(dev, seqno);
}
@@ -1910,7 +1824,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
- __func__, ret, seqno, i915_get_gem_seqno(dev));
+ __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
@@ -1918,7 +1832,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
* a separate wait queue to handle that.
*/
if (ret == 0)
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, ring);
return ret;
}
@@ -1928,9 +1842,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
* request and object lists appropriately for that event.
*/
static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+i915_wait_request(struct drm_device *dev, uint32_t seqno,
+ struct intel_ring_buffer *ring)
{
- return i915_do_wait_request(dev, seqno, 1);
+ return i915_do_wait_request(dev, seqno, 1, ring);
}
static void
@@ -1939,71 +1854,29 @@ i915_gem_flush(struct drm_device *dev,
uint32_t flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd;
- RING_LOCALS;
-
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
- invalidate_domains, flush_domains);
-#endif
- trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
- invalidate_domains, flush_domains);
-
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
+ dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+ invalidate_domains,
+ flush_domains);
+
+ if (HAS_BSD(dev))
+ dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
+ invalidate_domains,
+ flush_domains);
+}
- if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
- /*
- * read/write caches:
- *
- * I915_GEM_DOMAIN_RENDER is always invalidated, but is
- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
- * also flushed at 2d versus 3d pipeline switches.
- *
- * read-only caches:
- *
- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
- * MI_READ_FLUSH is set, and is always flushed on 965.
- *
- * I915_GEM_DOMAIN_COMMAND may not exist?
- *
- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
- * invalidated when MI_EXE_FLUSH is set.
- *
- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
- * invalidated with every MI_FLUSH.
- *
- * TLBs:
- *
- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
- * are flushed at any MI_FLUSH.
- */
-
- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
- cmd &= ~MI_NO_WRITE_FLUSH;
- if (!IS_I965G(dev)) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
- cmd |= MI_EXE_FLUSH;
-
-#if WATCH_EXEC
- DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains,
+ struct intel_ring_buffer *ring)
+{
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ drm_agp_chipset_flush(dev);
+ ring->flush(dev, ring,
+ invalidate_domains,
+ flush_domains);
}
/**
@@ -2030,7 +1903,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
DRM_INFO("%s: object %p wait for seqno %08x\n",
__func__, obj, obj_priv->last_rendering_seqno);
#endif
- ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+ ret = i915_wait_request(dev,
+ obj_priv->last_rendering_seqno, obj_priv->ring);
if (ret != 0)
return ret;
}
@@ -2146,11 +2020,14 @@ i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty;
- uint32_t seqno;
+ uint32_t seqno1, seqno2;
+ int ret;
spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list);
+ lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev) ||
+ list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
if (lists_empty)
@@ -2158,11 +2035,25 @@ i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
- if (seqno == 0)
+ seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+ &dev_priv->render_ring);
+ if (seqno1 == 0)
return -ENOMEM;
+ ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
+
+ if (HAS_BSD(dev)) {
+ seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+ &dev_priv->bsd_ring);
+ if (seqno2 == 0)
+ return -ENOMEM;
+
+ ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+ if (ret)
+ return ret;
+ }
+
- return i915_wait_request(dev, seqno);
+ return ret;
}
static int
@@ -2175,7 +2066,9 @@ i915_gem_evict_everything(struct drm_device *dev)
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
if (lists_empty)
@@ -2195,7 +2088,9 @@ i915_gem_evict_everything(struct drm_device *dev)
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
BUG_ON(!lists_empty);
@@ -2209,8 +2104,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
struct drm_gem_object *obj;
int ret;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
for (;;) {
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, render_ring);
+
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, bsd_ring);
/* If there's an inactive buffer available now, grab it
* and be done.
@@ -2234,14 +2134,30 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
* things, wait for the next to finish and hopefully leave us
* a buffer to evict.
*/
- if (!list_empty(&dev_priv->mm.request_list)) {
+ if (!list_empty(&render_ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&render_ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev,
+ request->seqno, request->ring);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+
+ if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
struct drm_i915_gem_request *request;
- request = list_first_entry(&dev_priv->mm.request_list,
+ request = list_first_entry(&bsd_ring->request_list,
struct drm_i915_gem_request,
list);
- ret = i915_wait_request(dev, request->seqno);
+ ret = i915_wait_request(dev,
+ request->seqno, request->ring);
if (ret)
return ret;
@@ -2268,10 +2184,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
if (obj != NULL) {
uint32_t seqno;
- i915_gem_flush(dev,
+ i915_gem_flush_ring(dev,
+ obj->write_domain,
obj->write_domain,
- obj->write_domain);
- seqno = i915_add_request(dev, NULL, obj->write_domain);
+ obj_priv->ring);
+ seqno = i915_add_request(dev, NULL,
+ obj->write_domain,
+ obj_priv->ring);
if (seqno == 0)
return -ENOMEM;
continue;
@@ -2299,6 +2218,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
struct inode *inode;
struct page *page;
+ BUG_ON(obj_priv->pages_refcount
+ == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
+
if (obj_priv->pages_refcount++ != 0)
return 0;
@@ -2697,6 +2619,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
return -EINVAL;
}
+ /* If the object is bigger than the entire aperture, reject it early
+ * before evicting everything in a vain attempt to find space.
+ */
+ if (obj->size > dev->gtt_total) {
+ DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+ return -E2BIG;
+ }
+
search_free:
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
obj->size, alignment, 0);
@@ -2807,6 +2737,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
@@ -2814,7 +2745,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
- (void) i915_add_request(dev, NULL, obj->write_domain);
+ (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
BUG_ON(obj->write_domain);
trace_i915_gem_object_change_domain(obj,
@@ -2954,23 +2885,24 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
DRM_INFO("%s: object %p wait for seqno %08x\n",
__func__, obj, obj_priv->last_rendering_seqno);
#endif
- ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+ ret = i915_do_wait_request(dev,
+ obj_priv->last_rendering_seqno,
+ 0,
+ obj_priv->ring);
if (ret != 0)
return ret;
}
+ i915_gem_object_flush_cpu_write_domain(obj);
+
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
- obj->read_domains &= I915_GEM_DOMAIN_GTT;
-
- i915_gem_object_flush_cpu_write_domain(obj);
-
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj_priv->dirty = 1;
@@ -3354,9 +3286,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
obj_priv->tiling_mode != I915_TILING_NONE;
/* Check fence reg constraints and rebind if necessary */
- if (need_fence && !i915_gem_object_fence_offset_ok(obj,
- obj_priv->tiling_mode))
- i915_gem_object_unbind(obj);
+ if (need_fence &&
+ !i915_gem_object_fence_offset_ok(obj,
+ obj_priv->tiling_mode)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
/* Choose the GTT offset for our buffer and put it there. */
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
@@ -3370,9 +3306,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
if (need_fence) {
ret = i915_gem_object_get_fence_reg(obj);
if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to install fence: %d\n",
- ret);
i915_gem_object_unpin(obj);
return ret;
}
@@ -3545,62 +3478,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return 0;
}
-/** Dispatch a batchbuffer to the ring
- */
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int nbox = exec->num_cliprects;
- int i = 0, count;
- uint32_t exec_start, exec_len;
- RING_LOCALS;
-
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
-
- trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
-
- count = nbox ? nbox : 1;
-
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- exec->DR1, exec->DR4);
- if (ret)
- return ret;
- }
-
- if (IS_I830(dev) || IS_845G(dev)) {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- OUT_RING(exec_start + exec_len - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- OUT_RING(exec_start);
- } else {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6));
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- }
- ADVANCE_LP_RING();
- }
- }
-
- /* XXX breadcrumb */
- return 0;
-}
-
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
@@ -3629,7 +3506,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- ret = i915_wait_request(dev, request->seqno);
+ ret = i915_wait_request(dev, request->seqno, request->ring);
if (ret != 0)
break;
}
@@ -3786,10 +3663,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
uint32_t seqno, flush_domains, reloc_index;
int pin_tries, flips;
+ struct intel_ring_buffer *ring = NULL;
+
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
+ if (args->flags & I915_EXEC_BSD) {
+ if (!HAS_BSD(dev)) {
+ DRM_ERROR("execbuf with wrong flag\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->bsd_ring;
+ } else {
+ ring = &dev_priv->render_ring;
+ }
+
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
@@ -3902,11 +3791,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret != -ENOSPC || pin_tries >= 1) {
if (ret != -ERESTARTSYS) {
unsigned long long total_size = 0;
- for (i = 0; i < args->buffer_count; i++)
+ int num_fences = 0;
+ for (i = 0; i < args->buffer_count; i++) {
+ obj_priv = object_list[i]->driver_private;
+
total_size += object_list[i]->size;
- DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+ num_fences +=
+ exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj_priv->tiling_mode != I915_TILING_NONE;
+ }
+ DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
pinned+1, args->buffer_count,
- total_size, ret);
+ total_size, num_fences,
+ ret);
DRM_ERROR("%d objects [%d pinned], "
"%d object bytes [%d pinned], "
"%d/%d gtt bytes\n",
@@ -3976,9 +3873,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
+ if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
(void)i915_add_request(dev, file_priv,
- dev->flush_domains);
+ dev->flush_domains,
+ &dev_priv->render_ring);
+
+ if (HAS_BSD(dev))
+ (void)i915_add_request(dev, file_priv,
+ dev->flush_domains,
+ &dev_priv->bsd_ring);
+ }
}
for (i = 0; i < args->buffer_count; i++) {
@@ -4015,7 +3919,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
#endif
/* Exec the batchbuffer */
- ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+ ret = ring->dispatch_gem_execbuffer(dev, ring, args,
+ cliprects, exec_offset);
if (ret) {
DRM_ERROR("dispatch failed %d\n", ret);
goto err;
@@ -4025,7 +3930,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires
*/
- flush_domains = i915_retire_commands(dev);
+ flush_domains = i915_retire_commands(dev, ring);
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4036,12 +3941,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* *some* interrupts representing completion of buffers that we can
* wait on when trying to clear up gtt space).
*/
- seqno = i915_add_request(dev, file_priv, flush_domains);
+ seqno = i915_add_request(dev, file_priv, flush_domains, ring);
BUG_ON(seqno == 0);
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ obj_priv = to_intel_bo(obj);
- i915_gem_object_move_to_active(obj, seqno);
+ i915_gem_object_move_to_active(obj, seqno, ring);
#if WATCH_LRU
DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif
@@ -4153,7 +4059,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.DR4 = args->DR4;
exec2.num_cliprects = args->num_cliprects;
exec2.cliprects_ptr = args->cliprects_ptr;
- exec2.flags = 0;
+ exec2.flags = I915_EXEC_RENDER;
ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
if (!ret) {
@@ -4239,7 +4145,20 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
+ BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+
i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ if (obj_priv->gtt_space != NULL) {
+ if (alignment == 0)
+ alignment = i915_gem_get_gtt_alignment(obj);
+ if (obj_priv->gtt_offset & (alignment - 1)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
+ }
+
if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment);
if (ret)
@@ -4392,6 +4311,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_busy *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ drm_i915_private_t *dev_priv = dev->dev_private;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
@@ -4406,7 +4326,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* actually unmasked, and our working set ends up being larger than
* required.
*/
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
obj_priv = to_intel_bo(obj);
/* Don't count being on the flushing list against the object being
@@ -4573,7 +4496,10 @@ i915_gem_idle(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
+ if (dev_priv->mm.suspended ||
+ (dev_priv->render_ring.gem_object == NULL) ||
+ (HAS_BSD(dev) &&
+ dev_priv->bsd_ring.gem_object == NULL)) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -4654,71 +4580,6 @@ err:
return ret;
}
-static int
-i915_gem_init_hws(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- /* If we need a physical address for the status page, it's already
- * initialized at driver load time.
- */
- if (!I915_NEED_GFX_HWS(dev))
- return 0;
-
- obj = i915_gem_alloc_object(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate status page\n");
- ret = -ENOMEM;
- goto err;
- }
- obj_priv = to_intel_bo(obj);
- obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
- ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- goto err_unref;
- }
-
- dev_priv->status_gfx_addr = obj_priv->gtt_offset;
-
- dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
- if (dev_priv->hw_status_page == NULL) {
- DRM_ERROR("Failed to map status page.\n");
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- ret = -EINVAL;
- goto err_unpin;
- }
-
- if (HAS_PIPE_CONTROL(dev)) {
- ret = i915_gem_init_pipe_control(dev);
- if (ret)
- goto err_unpin;
- }
-
- dev_priv->hws_obj = obj;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- if (IS_GEN6(dev)) {
- I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
- I915_READ(HWS_PGA_GEN6); /* posting read */
- } else {
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- I915_READ(HWS_PGA); /* posting read */
- }
- DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
-
- return 0;
-
-err_unpin:
- i915_gem_object_unpin(obj);
-err_unref:
- drm_gem_object_unreference(obj);
-err:
- return 0;
-}
static void
i915_gem_cleanup_pipe_control(struct drm_device *dev)
@@ -4737,146 +4598,46 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
dev_priv->seqno_page = NULL;
}
-static void
-i915_gem_cleanup_hws(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- if (dev_priv->hws_obj == NULL)
- return;
-
- obj = dev_priv->hws_obj;
- obj_priv = to_intel_bo(obj);
-
- kunmap(obj_priv->pages[0]);
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- dev_priv->hws_obj = NULL;
-
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- dev_priv->hw_status_page = NULL;
-
- if (HAS_PIPE_CONTROL(dev))
- i915_gem_cleanup_pipe_control(dev);
-
- /* Write high address into HWS_PGA when disabling. */
- I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- drm_i915_ring_buffer_t *ring = &dev_priv->ring;
int ret;
- u32 head;
-
- ret = i915_gem_init_hws(dev);
- if (ret != 0)
- return ret;
- obj = i915_gem_alloc_object(dev, 128 * 1024);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate ringbuffer\n");
- i915_gem_cleanup_hws(dev);
- return -ENOMEM;
- }
- obj_priv = to_intel_bo(obj);
+ dev_priv->render_ring = render_ring;
- ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- i915_gem_cleanup_hws(dev);
- return ret;
+ if (!I915_NEED_GFX_HWS(dev)) {
+ dev_priv->render_ring.status_page.page_addr
+ = dev_priv->status_page_dmah->vaddr;
+ memset(dev_priv->render_ring.status_page.page_addr,
+ 0, PAGE_SIZE);
}
- /* Set up the kernel mapping for the ring. */
- ring->Size = obj->size;
-
- ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
- ring->map.size = obj->size;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
- DRM_ERROR("Failed to map ringbuffer.\n");
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- i915_gem_cleanup_hws(dev);
- return -EINVAL;
- }
- ring->ring_obj = obj;
- ring->virtual_start = ring->map.handle;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_TAIL, 0);
- I915_WRITE(PRB0_HEAD, 0);
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
- /* G45 ring initialization fails to reset head to zero */
- if (head != 0) {
- DRM_ERROR("Ring head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- I915_WRITE(PRB0_HEAD, 0);
-
- DRM_ERROR("Ring head forced to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- }
-
- I915_WRITE(PRB0_CTL,
- ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID);
-
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
- /* If the head is still not zero, the ring is dead */
- if (head != 0) {
- DRM_ERROR("Ring initialization failed "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- return -EIO;
+ if (HAS_PIPE_CONTROL(dev)) {
+ ret = i915_gem_init_pipe_control(dev);
+ if (ret)
+ return ret;
}
- /* Update our cache of the ring state */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
- }
+ ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+ if (ret)
+ goto cleanup_pipe_control;
- if (IS_I9XX(dev) && !IS_GEN3(dev)) {
- I915_WRITE(MI_MODE,
- (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+ if (HAS_BSD(dev)) {
+ dev_priv->bsd_ring = bsd_ring;
+ ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+ if (ret)
+ goto cleanup_render_ring;
}
return 0;
+
+cleanup_render_ring:
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+cleanup_pipe_control:
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
+ return ret;
}
void
@@ -4884,17 +4645,11 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->ring.ring_obj == NULL)
- return;
-
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
-
- i915_gem_object_unpin(dev_priv->ring.ring_obj);
- drm_gem_object_unreference(dev_priv->ring.ring_obj);
- dev_priv->ring.ring_obj = NULL;
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-
- i915_gem_cleanup_hws(dev);
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+ if (HAS_BSD(dev))
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
}
int
@@ -4922,12 +4677,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
spin_lock(&dev_priv->mm.active_list_lock);
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
+ BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
spin_unlock(&dev_priv->mm.active_list_lock);
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
+ BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
mutex_unlock(&dev->struct_mutex);
drm_irq_install(dev);
@@ -4966,18 +4723,20 @@ i915_gem_load(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->mm.active_list_lock);
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
+ INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
+ if (HAS_BSD(dev)) {
+ INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
+ INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
+ }
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
- dev_priv->mm.next_gem_seqno = 1;
-
spin_lock(&shrink_list_lock);
list_add(&dev_priv->mm.shrink_list, &shrink_list);
spin_unlock(&shrink_list_lock);
@@ -5209,7 +4968,9 @@ i915_gpu_is_active(struct drm_device *dev)
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list);
+ list_empty(&dev_priv->render_ring.active_list);
+ if (HAS_BSD(dev))
+ lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
spin_unlock(&dev_priv->mm.active_list_lock);
return !lists_empty;
@@ -5254,8 +5015,10 @@ rescan:
continue;
spin_unlock(&shrink_list_lock);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
- i915_gem_retire_requests(dev);
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8c3f0802686d..2479be001e40 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -53,7 +53,7 @@
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
/** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
PIPE_VBLANK_INTERRUPT_STATUS)
@@ -74,7 +74,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
-static inline void
+void
ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
@@ -115,7 +115,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
-static inline void
+void
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != mask) {
@@ -278,10 +278,9 @@ static void i915_handle_rps_change(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
- u16 rgvswctl;
u8 new_delay = dev_priv->cur_delay;
- I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
+ I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
busy_up = I915_READ(RCPREVBSYTUPAVG);
busy_down = I915_READ(RCPREVBSYTDNAVG);
max_avg = I915_READ(RCBMAXAVG);
@@ -300,27 +299,8 @@ static void i915_handle_rps_change(struct drm_device *dev)
new_delay = dev_priv->min_delay;
}
- DRM_DEBUG("rps change requested: %d -> %d\n",
- dev_priv->cur_delay, new_delay);
-
- rgvswctl = I915_READ(MEMSWCTL);
- if (rgvswctl & MEMCTL_CMD_STS) {
- DRM_ERROR("gpu busy, RCS change rejected\n");
- return; /* still busy with another command */
- }
-
- /* Program the new state */
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE(MEMSWCTL, rgvswctl);
- POSTING_READ(MEMSWCTL);
-
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE(MEMSWCTL, rgvswctl);
-
- dev_priv->cur_delay = new_delay;
-
- DRM_DEBUG("rps changed\n");
+ if (ironlake_set_drps(dev, new_delay))
+ dev_priv->cur_delay = new_delay;
return;
}
@@ -331,6 +311,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir;
struct drm_i915_master_private *master_priv;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
@@ -354,13 +335,16 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
}
if (gt_iir & GT_PIPE_NOTIFY) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
+ u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+ render_ring->irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
if (de_iir & DE_GSE)
ironlake_opregion_gse_intr(dev);
@@ -388,7 +372,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
}
if (de_iir & DE_PCU_EVENT) {
- I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
i915_handle_rps_change(dev);
}
@@ -536,17 +520,18 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
*/
bbaddr = 0;
head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring = (u32 *)(dev_priv->ring.virtual_start + head);
+ ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
- while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+ while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
bbaddr = i915_get_bbaddr(dev, ring);
if (bbaddr)
break;
}
if (bbaddr == 0) {
- ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
- while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+ ring = (u32 *)(dev_priv->render_ring.virtual_start
+ + dev_priv->render_ring.size);
+ while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
bbaddr = i915_get_bbaddr(dev, ring);
if (bbaddr)
break;
@@ -587,7 +572,7 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
}
- error->seqno = i915_get_gem_seqno(dev);
+ error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
@@ -615,7 +600,9 @@ static void i915_capture_error_state(struct drm_device *dev)
batchbuffer[0] = NULL;
batchbuffer[1] = NULL;
count = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ list_for_each_entry(obj_priv,
+ &dev_priv->render_ring.active_list, list) {
+
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
@@ -639,7 +626,8 @@ static void i915_capture_error_state(struct drm_device *dev)
error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
/* Record the ringbuffer */
- error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
+ error->ringbuffer = i915_error_object_create(dev,
+ dev_priv->render_ring.gem_object);
/* Record buffers on the active list. */
error->active_bo = NULL;
@@ -651,7 +639,8 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo) {
int i = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ list_for_each_entry(obj_priv,
+ &dev_priv->render_ring.active_list, list) {
struct drm_gem_object *obj = &obj_priv->base;
error->active_bo[i].size = obj->size;
@@ -703,24 +692,13 @@ void i915_destroy_error_state(struct drm_device *dev)
i915_error_state_free(dev, error);
}
-/**
- * i915_handle_error - handle an error interrupt
- * @dev: drm device
- *
- * Do some basic checking of regsiter state at error interrupt time and
- * dump it to the syslog. Also call i915_capture_error_state() to make
- * sure we get a record and make it available in debugfs. Fire a uevent
- * so userspace knows something bad happened (should trigger collection
- * of a ring dump etc.).
- */
-static void i915_handle_error(struct drm_device *dev, bool wedged)
+static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 eir = I915_READ(EIR);
- u32 pipea_stats = I915_READ(PIPEASTAT);
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
- i915_capture_error_state(dev);
+ if (!eir)
+ return;
printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
eir);
@@ -766,6 +744,9 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
+ u32 pipea_stats = I915_READ(PIPEASTAT);
+ u32 pipeb_stats = I915_READ(PIPEBSTAT);
+
printk(KERN_ERR "memory refresh error\n");
printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
pipea_stats);
@@ -822,6 +803,24 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
+}
+
+/**
+ * i915_handle_error - handle an error interrupt
+ * @dev: drm device
+ *
+ * Do some basic checking of regsiter state at error interrupt time and
+ * dump it to the syslog. Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs. Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+static void i915_handle_error(struct drm_device *dev, bool wedged)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ i915_capture_error_state(dev);
+ i915_report_and_clear_eir(dev);
if (wedged) {
atomic_set(&dev_priv->mm.wedged, 1);
@@ -829,7 +828,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- DRM_WAKEUP(&dev_priv->irq_queue);
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -848,6 +847,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
unsigned long irqflags;
int irq_received;
int ret = IRQ_NONE;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
atomic_inc(&dev_priv->irq_received);
@@ -928,14 +928,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
+ u32 seqno =
+ render_ring->get_gem_seqno(dev, render_ring);
+ render_ring->irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
+ if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
intel_prepare_page_flip(dev, 0);
@@ -984,7 +988,6 @@ static int i915_emit_irq(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- RING_LOCALS;
i915_kernel_lost_context(dev);
@@ -1006,43 +1009,13 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->counter;
}
-void i915_user_irq_get(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
- else
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
- }
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-void i915_user_irq_put(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
- if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
- else
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
- }
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
if (dev_priv->trace_irq_seqno == 0)
- i915_user_irq_get(dev);
+ render_ring->user_irq_get(dev, render_ring);
dev_priv->trace_irq_seqno = seqno;
}
@@ -1052,6 +1025,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@@ -1065,10 +1039,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- i915_user_irq_get(dev);
- DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+ render_ring->user_irq_get(dev, render_ring);
+ DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
- i915_user_irq_put(dev);
+ render_ring->user_irq_put(dev, render_ring);
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1087,7 +1061,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
drm_i915_irq_emit_t *emit = data;
int result;
- if (!dev_priv || !dev_priv->ring.virtual_start) {
+ if (!dev_priv || !dev_priv->render_ring.virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1233,9 +1207,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
-struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
+struct drm_i915_gem_request *
+i915_get_tail_request(struct drm_device *dev)
+{
drm_i915_private_t *dev_priv = dev->dev_private;
- return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
+ return list_entry(dev_priv->render_ring.request_list.prev,
+ struct drm_i915_gem_request, list);
}
/**
@@ -1260,8 +1237,10 @@ void i915_hangcheck_elapsed(unsigned long data)
acthd = I915_READ(ACTHD_I965);
/* If all work is done then ACTHD clearly hasn't advanced. */
- if (list_empty(&dev_priv->mm.request_list) ||
- i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
+ if (list_empty(&dev_priv->render_ring.request_list) ||
+ i915_seqno_passed(i915_get_gem_seqno(dev,
+ &dev_priv->render_ring),
+ i915_get_tail_request(dev)->seqno)) {
dev_priv->hangcheck_count = 0;
return;
}
@@ -1314,7 +1293,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
- u32 render_mask = GT_PIPE_NOTIFY;
+ u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
@@ -1328,7 +1307,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
(void) I915_READ(DEIER);
/* user interrupt should be enabled, but masked initial */
- dev_priv->gt_irq_mask_reg = 0xffffffff;
+ dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
@@ -1391,7 +1370,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
u32 error_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+
+ if (HAS_BSD(dev))
+ DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f3e39cc46f0d..64b0a3afd92b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -334,6 +334,7 @@
#define I915_DEBUG_INTERRUPT (1<<2)
#define I915_USER_INTERRUPT (1<<1)
#define I915_ASLE_INTERRUPT (1<<0)
+#define I915_BSD_USER_INTERRUPT (1<<25)
#define EIR 0x020b0
#define EMR 0x020b4
#define ESR 0x020b8
@@ -368,6 +369,36 @@
#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+/* GEN6 interrupt control */
+#define GEN6_RENDER_HWSTAM 0x2098
+#define GEN6_RENDER_IMR 0x20a8
+#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
+#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
+#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6)
+#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
+#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
+#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
+#define GEN6_RENDER_SYNC_STATUS (1 << 2)
+#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1)
+#define GEN6_RENDER_USER_INTERRUPT (1 << 0)
+
+#define GEN6_BLITTER_HWSTAM 0x22098
+#define GEN6_BLITTER_IMR 0x220a8
+#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26)
+#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
+#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
+#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
+/*
+ * BSD (bit stream decoder instruction and interrupt control register defines
+ * (G4X and Ironlake only)
+ */
+
+#define BSD_RING_TAIL 0x04030
+#define BSD_RING_HEAD 0x04034
+#define BSD_RING_START 0x04038
+#define BSD_RING_CTL 0x0403c
+#define BSD_RING_ACTHD 0x04074
+#define BSD_HWS_PGA 0x04080
/*
* Framebuffer compression (915+ only)
@@ -805,6 +836,10 @@
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
+/** Pineview MCH register contains DDR3 setting */
+#define CSHRDDR3CTL 0x101a8
+#define CSHRDDR3CTL_DDR3 (1 << 2)
+
/** 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206
#define C1DRB3 0x10606
@@ -826,6 +861,12 @@
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
+#define TR1 0x11006
+#define TSFS 0x11020
+#define TSFS_SLOPE_MASK 0x0000ff00
+#define TSFS_SLOPE_SHIFT 8
+#define TSFS_INTR_MASK 0x000000ff
+
#define CRSTANDVID 0x11100
#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
#define PXVFREQ_PX_MASK 0x7f000000
@@ -964,6 +1005,41 @@
#define MEMSTAT_SRC_CTL_STDBY 3
#define RCPREVBSYTUPAVG 0x113b8
#define RCPREVBSYTDNAVG 0x113bc
+#define SDEW 0x1124c
+#define CSIEW0 0x11250
+#define CSIEW1 0x11254
+#define CSIEW2 0x11258
+#define PEW 0x1125c
+#define DEW 0x11270
+#define MCHAFE 0x112c0
+#define CSIEC 0x112e0
+#define DMIEC 0x112e4
+#define DDREC 0x112e8
+#define PEG0EC 0x112ec
+#define PEG1EC 0x112f0
+#define GFXEC 0x112f4
+#define RPPREVBSYTUPAVG 0x113b8
+#define RPPREVBSYTDNAVG 0x113bc
+#define ECR 0x11600
+#define ECR_GPFE (1<<31)
+#define ECR_IMONE (1<<30)
+#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
+#define OGW0 0x11608
+#define OGW1 0x1160c
+#define EG0 0x11610
+#define EG1 0x11614
+#define EG2 0x11618
+#define EG3 0x1161c
+#define EG4 0x11620
+#define EG5 0x11624
+#define EG6 0x11628
+#define EG7 0x1162c
+#define PXW 0x11664
+#define PXWL 0x11680
+#define LCFUSE02 0x116c0
+#define LCFUSE_HIV_MASK 0x000000ff
+#define CSIPLL0 0x12c10
+#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
/*
@@ -1055,7 +1131,6 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
-#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -2355,6 +2430,8 @@
#define GT_PIPE_NOTIFY (1 << 4)
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
+#define GT_BSD_USER_INTERRUPT (1 << 5)
+
#define GTISR 0x44010
#define GTIMR 0x44014
@@ -2690,6 +2767,9 @@
#define SDVO_ENCODING (0)
#define TMDS_ENCODING (2 << 10)
#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
+/* CPT */
+#define HDMI_MODE_SELECT (1 << 9)
+#define DVI_MODE_SELECT (0)
#define SDVOB_BORDER_ENABLE (1 << 7)
#define AUDIO_ENABLE (1 << 6)
#define VSYNC_ACTIVE_HIGH (1 << 4)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 9e4c45f68d6e..fab21760dd57 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -53,23 +53,6 @@ TRACE_EVENT(i915_gem_object_bind,
__entry->obj, __entry->gtt_offset)
);
-TRACE_EVENT(i915_gem_object_clflush,
-
- TP_PROTO(struct drm_gem_object *obj),
-
- TP_ARGS(obj),
-
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- ),
-
- TP_fast_assign(
- __entry->obj = obj;
- ),
-
- TP_printk("obj=%p", __entry->obj)
-);
-
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
@@ -132,6 +115,13 @@ DECLARE_EVENT_CLASS(i915_gem_object,
TP_printk("obj=%p", __entry->obj)
);
+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj)
+);
+
DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
TP_PROTO(struct drm_gem_object *obj),
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4c748d8f73d6..96f75d7f6633 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -95,6 +95,16 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
panel_fixed_mode->clock = dvo_timing->clock * 10;
panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e16ac5a28c3c..22ff38455731 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -217,7 +217,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 hotplug_en;
+ u32 hotplug_en, orig, stat;
+ bool ret = false;
int i, tries = 0;
if (HAS_PCH_SPLIT(dev))
@@ -232,8 +233,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
tries = 2;
else
tries = 1;
- hotplug_en = I915_READ(PORT_HOTPLUG_EN);
- hotplug_en &= CRT_FORCE_HOTPLUG_MASK;
+ hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= CRT_HOTPLUG_MASK;
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
if (IS_G4X(dev))
@@ -255,11 +256,17 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
} while (time_after(timeout, jiffies));
}
- if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
- CRT_HOTPLUG_MONITOR_NONE)
- return true;
+ stat = I915_READ(PORT_HOTPLUG_STAT);
+ if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
+
+ /* clear the interrupt we just generated, if any */
+ I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
- return false;
+ /* and put the bits back */
+ I915_WRITE(PORT_HOTPLUG_EN, orig);
+
+ return ret;
}
static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
@@ -569,7 +576,7 @@ void intel_crt_init(struct drm_device *dev)
(1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- connector->interlace_allowed = 0;
+ connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f469a84cacfd..cc8131ff319f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1029,19 +1029,28 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1);
u32 fbc_ctl;
if (!I915_HAS_FBC(dev))
return;
+ if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
+ return; /* Already off, just return */
+
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING)
- ; /* nothing */
+ while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
+ if (time_after(jiffies, timeout)) {
+ DRM_DEBUG_DRIVER("FBC idle timed out\n");
+ break;
+ }
+ ; /* do nothing */
+ }
intel_wait_for_vblank(dev);
@@ -1239,10 +1248,11 @@ static void intel_update_fbc(struct drm_crtc *crtc,
return;
out_disable:
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
/* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev))
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
+ }
}
static int
@@ -1386,7 +1396,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
Start = obj_priv->gtt_offset;
Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, crtc->fb->pitch);
I915_WRITE(dspstride, crtc->fb->pitch);
if (IS_I965G(dev)) {
I915_WRITE(dspbase, Offset);
@@ -2345,6 +2356,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
if (mode->clock * 3 > 27000 * 4)
return MODE_CLOCK_HIGH;
}
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
@@ -2629,6 +2642,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
struct cxsr_latency {
int is_desktop;
+ int is_ddr3;
unsigned long fsb_freq;
unsigned long mem_freq;
unsigned long display_sr;
@@ -2638,33 +2652,45 @@ struct cxsr_latency {
};
static struct cxsr_latency cxsr_latency_table[] = {
- {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
- {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
- {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
-
- {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
- {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
- {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
-
- {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
- {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
- {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
-
- {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
- {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
- {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
-
- {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
- {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
- {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
-
- {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
- {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
- {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
- int mem)
+static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
+ int fsb, int mem)
{
int i;
struct cxsr_latency *latency;
@@ -2675,6 +2701,7 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
fsb == latency->fsb_freq && mem == latency->mem_freq)
return latency;
}
@@ -2789,8 +2816,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
struct cxsr_latency *latency;
int sr_clock;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
- dev_priv->mem_freq);
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
pineview_disable_cxsr(dev);
@@ -3626,6 +3653,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
}
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+ pipeconf |= PIPEACONF_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
+
+
/* Disable the panel fitter if it was on our pipe */
if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
@@ -3772,6 +3804,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vdisplay -= 1;
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_start -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ adjusted_mode->crtc_vsync_end -= 1;
+ adjusted_mode->crtc_vsync_start -= 1;
+ } else
+ pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+
I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
@@ -3934,6 +3978,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked;
}
+
+ ret = i915_gem_object_set_to_gtt_domain(bo, 0);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_unpin;
+ }
+
addr = obj_priv->gtt_offset;
} else {
ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
@@ -3977,6 +4028,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_bo = bo;
return 0;
+fail_unpin:
+ i915_gem_object_unpin(bo);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
@@ -4436,6 +4489,8 @@ static void intel_idle_update(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
+ i915_update_gfx_val(dev_priv);
+
if (IS_I945G(dev) || IS_I945GM(dev)) {
DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
@@ -4564,12 +4619,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
- if (work && !work->pending) {
- obj_priv = to_intel_bo(work->pending_flip_obj);
- DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
- obj_priv,
- atomic_read(&obj_priv->pending_flip));
- }
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
@@ -4629,14 +4678,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags;
int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
int ret, pipesrc;
- RING_LOCALS;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
- mutex_lock(&dev->struct_mutex);
-
work->event = event;
work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
@@ -4646,10 +4692,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
- mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
}
intel_crtc->unpin_work = work;
@@ -4658,13 +4704,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
+ mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
- DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
- to_intel_bo(obj));
- kfree(work);
- intel_crtc->unpin_work = NULL;
mutex_unlock(&dev->struct_mutex);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ kfree(work);
+
+ DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
+ to_intel_bo(obj));
return ret;
}
@@ -5023,10 +5075,32 @@ err_unref:
return NULL;
}
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+ POSTING_READ16(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
int i = 0;
@@ -5045,13 +5119,21 @@ void ironlake_enable_drps(struct drm_device *dev)
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
+ fstart = fmax;
+
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
- dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
+ dev_priv->fmax = fstart; /* IPS callback will increase this */
+ dev_priv->fstart = fstart;
+
+ dev_priv->max_delay = fmax;
dev_priv->min_delay = fmin;
dev_priv->cur_delay = fstart;
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
+ fstart);
+
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
/*
@@ -5073,20 +5155,19 @@ void ironlake_enable_drps(struct drm_device *dev)
}
msleep(1);
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE(MEMSWCTL, rgvswctl);
- POSTING_READ(MEMSWCTL);
+ ironlake_set_drps(dev, fstart);
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE(MEMSWCTL, rgvswctl);
+ dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ I915_READ(0x112e0);
+ dev_priv->last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->last_time2);
}
void ironlake_disable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rgvswctl;
- u8 fstart;
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
/* Ack interrupts, disable EFC interrupt */
I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -5096,11 +5177,7 @@ void ironlake_disable_drps(struct drm_device *dev)
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
- fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
- MEMMODE_FSTART_SHIFT;
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE(MEMSWCTL, rgvswctl);
+ ironlake_set_drps(dev, dev_priv->fstart);
msleep(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
@@ -5108,6 +5185,92 @@ void ironlake_disable_drps(struct drm_device *dev)
}
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ unsigned long freq;
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ freq = ((div * 133333) / ((1<<post) * pre));
+
+ return freq;
+}
+
+void intel_init_emon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lcfuse;
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ I915_WRITE(ECR, 0);
+ POSTING_READ(ECR);
+
+ /* Program energy weights for various events */
+ I915_WRITE(SDEW, 0x15040d00);
+ I915_WRITE(CSIEW0, 0x007f0000);
+ I915_WRITE(CSIEW1, 0x1e220004);
+ I915_WRITE(CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ I915_WRITE(PEW + (i * 4), 0);
+ for (i = 0; i < 3; i++)
+ I915_WRITE(DEW + (i * 4), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ unsigned long freq = intel_pxfreq(pxvidfreq);
+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+ unsigned long val;
+
+ val = vid * vid;
+ val *= (freq / 1000);
+ val *= 255;
+ val /= (127*127*900);
+ if (val > 0xff)
+ DRM_ERROR("bad pxval: %ld\n", val);
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+ I915_WRITE(PXW + (i * 4), val);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ I915_WRITE(OGW0, 0);
+ I915_WRITE(OGW1, 0);
+ I915_WRITE(EG0, 0x00007f00);
+ I915_WRITE(EG1, 0x0000000e);
+ I915_WRITE(EG2, 0x000e0000);
+ I915_WRITE(EG3, 0x68000300);
+ I915_WRITE(EG4, 0x42000000);
+ I915_WRITE(EG5, 0x00140031);
+ I915_WRITE(EG6, 0);
+ I915_WRITE(EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ I915_WRITE(PXWL + (i * 4), 0);
+
+ /* Enable PMON + select events */
+ I915_WRITE(ECR, 0x80000019);
+
+ lcfuse = I915_READ(LCFUSE02);
+
+ dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5277,11 +5440,13 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
DRM_INFO("failed to find known CxSR latency "
- "(found fsb freq %d, mem freq %d), "
+ "(found ddr%s fsb freq %d, mem freq %d), "
"disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3": "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
pineview_disable_cxsr(dev);
@@ -5310,7 +5475,6 @@ static void intel_init_display(struct drm_device *dev)
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int num_pipe;
int i;
drm_mode_config_init(dev);
@@ -5340,13 +5504,13 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
if (IS_MOBILE(dev) || IS_I9XX(dev))
- num_pipe = 2;
+ dev_priv->num_pipe = 2;
else
- num_pipe = 1;
+ dev_priv->num_pipe = 1;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
- num_pipe, num_pipe > 1 ? "s" : "");
+ dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
- for (i = 0; i < num_pipe; i++) {
+ for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
}
@@ -5354,8 +5518,10 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_clock_gating(dev);
- if (IS_IRONLAKE_M(dev))
+ if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
+ intel_init_emon(dev);
+ }
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6b1c9a27c27a..49b54f05d3cf 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -576,7 +576,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct intel_encoder *intel_encoder;
struct intel_dp_priv *dp_priv;
- if (!encoder || encoder->crtc != crtc)
+ if (encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
@@ -675,10 +675,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
dp_priv->link_configuration[1] = dp_priv->lane_count;
/*
- * Check for DPCD version > 1.1,
- * enable enahanced frame stuff in that case
+ * Check for DPCD version > 1.1 and enhanced framing support
*/
- if (dp_priv->dpcd[0] >= 0x11) {
+ if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
dp_priv->DP |= DP_ENHANCED_FRAMING;
}
@@ -1208,6 +1207,8 @@ ironlake_dp_detect(struct drm_connector *connector)
if (dp_priv->dpcd[0] != 0)
status = connector_status_connected;
}
+ DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
+ dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
return status;
}
@@ -1352,7 +1353,7 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- if (!encoder || encoder->crtc != crtc)
+ if (encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6f53cf7fbc50..c3c505244e07 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -105,7 +105,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
}
/* Flush everything out, we'll be doing GTT only from now on */
- i915_gem_object_set_to_gtt_domain(fbo, 1);
+ ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
+ if (ret) {
+ DRM_ERROR("failed to bind fb: %d.\n", ret);
+ goto out_unpin;
+ }
info = framebuffer_alloc(0, device);
if (!info) {
@@ -241,6 +245,7 @@ int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
if (!ifbdev)
@@ -249,8 +254,13 @@ int intel_fbdev_init(struct drm_device *dev)
dev_priv->fbdev = ifbdev;
ifbdev->helper.funcs = &intel_fb_helper_funcs;
- drm_fb_helper_init(dev, &ifbdev->helper, 2,
- INTELFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(dev, &ifbdev->helper,
+ dev_priv->num_pipe,
+ INTELFB_CONN_LIMIT);
+ if (ret) {
+ kfree(ifbdev);
+ return ret;
+ }
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
drm_fb_helper_initial_config(&ifbdev->helper, 32);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 65727f0a79a3..83bd764b000e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -59,8 +59,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
SDVO_VSYNC_ACTIVE_HIGH |
SDVO_HSYNC_ACTIVE_HIGH;
- if (hdmi_priv->has_hdmi_sink)
+ if (hdmi_priv->has_hdmi_sink) {
sdvox |= SDVO_AUDIO_ENABLE;
+ if (HAS_PCH_CPT(dev))
+ sdvox |= HDMI_MODE_SELECT;
+ }
if (intel_crtc->pipe == 1) {
if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index b0e17b06eb6e..d7ad5139d17c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -211,9 +211,8 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- RING_LOCALS;
+ drm_i915_private_t *dev_priv = dev->dev_private;
BUG_ON(overlay->active);
@@ -227,11 +226,13 @@ static int intel_overlay_on(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev,
+ overlay->last_flip_req, 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -248,7 +249,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
drm_i915_private_t *dev_priv = dev->dev_private;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
- RING_LOCALS;
BUG_ON(!overlay->active);
@@ -265,7 +265,8 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
OUT_RING(flip_addr);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
}
static int intel_overlay_wait_flip(struct intel_overlay *overlay)
@@ -274,10 +275,10 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
u32 tmp;
- RING_LOCALS;
if (overlay->last_flip_req != 0) {
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret == 0) {
overlay->last_flip_req = 0;
@@ -296,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -314,9 +317,8 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
u32 flip_addr = overlay->flip_addr;
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- RING_LOCALS;
BUG_ON(!overlay->active);
@@ -336,11 +338,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -354,11 +358,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -390,22 +396,23 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
int interruptible)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
+ drm_i915_private_t *dev_priv = dev->dev_private;
u32 flip_addr;
int ret;
- RING_LOCALS;
if (overlay->hw_wedged == HW_WEDGED)
return -EIO;
if (overlay->last_flip_req == 0) {
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
}
- ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ interruptible, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -429,12 +436,13 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req = i915_add_request(dev, NULL,
+ 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible);
+ interruptible, &dev_priv->render_ring);
if (ret != 0)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 000000000000..cea4f1a8709e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,849 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Zou Nan hai <nanhai.zou@intel.com>
+ * Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+#include "i915_trace.h"
+
+static void
+render_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+ invalidate_domains, flush_domains);
+#endif
+ u32 cmd;
+ trace_i915_gem_request_flush(dev, ring->next_seqno,
+ invalidate_domains, flush_domains);
+
+ if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) &
+ I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (!IS_I965G(dev)) {
+ /*
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
+ */
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+ }
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+ intel_ring_begin(dev, ring, 8);
+ intel_ring_emit(dev, ring, cmd);
+ intel_ring_emit(dev, ring, MI_NOOP);
+ intel_ring_advance(dev, ring);
+ }
+}
+
+static unsigned int render_ring_get_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+}
+
+static unsigned int render_ring_get_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+}
+
+static unsigned int render_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+
+ return I915_READ(acthd_reg);
+}
+
+static void render_ring_advance_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ I915_WRITE(PRB0_TAIL, ring->tail);
+}
+
+static int init_ring_common(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ u32 head;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ obj_priv = to_intel_bo(ring->gem_object);
+
+ /* Stop the ring if it's running. */
+ I915_WRITE(ring->regs.ctl, 0);
+ I915_WRITE(ring->regs.head, 0);
+ I915_WRITE(ring->regs.tail, 0);
+
+ /* Initialize the ring. */
+ I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
+ head = ring->get_head(dev, ring);
+
+ /* G45 ring initialization fails to reset head to zero */
+ if (head != 0) {
+ DRM_ERROR("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ(ring->regs.ctl),
+ I915_READ(ring->regs.head),
+ I915_READ(ring->regs.tail),
+ I915_READ(ring->regs.start));
+
+ I915_WRITE(ring->regs.head, 0);
+
+ DRM_ERROR("%s head forced to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ(ring->regs.ctl),
+ I915_READ(ring->regs.head),
+ I915_READ(ring->regs.tail),
+ I915_READ(ring->regs.start));
+ }
+
+ I915_WRITE(ring->regs.ctl,
+ ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+ | RING_NO_REPORT | RING_VALID);
+
+ head = I915_READ(ring->regs.head) & HEAD_ADDR;
+ /* If the head is still not zero, the ring is dead */
+ if (head != 0) {
+ DRM_ERROR("%s initialization failed "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ(ring->regs.ctl),
+ I915_READ(ring->regs.head),
+ I915_READ(ring->regs.tail),
+ I915_READ(ring->regs.start));
+ return -EIO;
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kernel_lost_context(dev);
+ else {
+ ring->head = ring->get_head(dev, ring);
+ ring->tail = ring->get_tail(dev, ring);
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ }
+ return 0;
+}
+
+static int init_render_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = init_ring_common(dev, ring);
+ if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+ I915_WRITE(MI_MODE,
+ (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+ }
+ return ret;
+}
+
+#define PIPE_CONTROL_FLUSH(addr) \
+do { \
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL | 2); \
+ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
+ OUT_RING(0); \
+ OUT_RING(0); \
+} while (0)
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+static u32
+render_ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_file *file_priv,
+ u32 flush_domains)
+{
+ u32 seqno;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ seqno = intel_ring_get_seqno(dev, ring);
+
+ if (IS_GEN6(dev)) {
+ BEGIN_LP_RING(6);
+ OUT_RING(GFX_OP_PIPE_CONTROL | 3);
+ OUT_RING(PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else if (HAS_PIPE_CONTROL(dev)) {
+ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+
+ /*
+ * Workaround qword write incoherence by flushing the
+ * PIPE_NOTIFY buffers out to memory before requesting
+ * an interrupt.
+ */
+ BEGIN_LP_RING(32);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
+ return seqno;
+}
+
+static u32
+render_ring_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ if (HAS_PIPE_CONTROL(dev))
+ return ((volatile u32 *)(dev_priv->seqno_page))[0];
+ else
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+render_ring_get_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ else
+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+}
+
+static void
+render_ring_put_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
+ if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ else
+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+}
+
+static void render_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (IS_GEN6(dev)) {
+ I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
+ I915_READ(HWS_PGA_GEN6); /* posting read */
+ } else {
+ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+ I915_READ(HWS_PGA); /* posting read */
+ }
+
+}
+
+void
+bsd_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ intel_ring_begin(dev, ring, 8);
+ intel_ring_emit(dev, ring, MI_FLUSH);
+ intel_ring_emit(dev, ring, MI_NOOP);
+ intel_ring_advance(dev, ring);
+}
+
+static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BSD_RING_ACTHD);
+}
+
+static inline void bsd_ring_advance_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ I915_WRITE(BSD_RING_TAIL, ring->tail);
+}
+
+static int init_bsd_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ return init_ring_common(dev, ring);
+}
+
+static u32
+bsd_ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_file *file_priv,
+ u32 flush_domains)
+{
+ u32 seqno;
+ seqno = intel_ring_get_seqno(dev, ring);
+ intel_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(dev, ring,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(dev, ring, seqno);
+ intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+ intel_ring_advance(dev, ring);
+
+ DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+
+ return seqno;
+}
+
+static void bsd_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
+ I915_READ(BSD_HWS_PGA);
+}
+
+static void
+bsd_ring_get_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+static void
+bsd_ring_put_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+
+static u32
+bsd_ring_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static int
+bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
+{
+ uint32_t exec_start;
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
+ (2 << 6) | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(dev, ring, exec_start);
+ intel_ring_advance(dev, ring);
+ return 0;
+}
+
+
+static int
+render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int nbox = exec->num_cliprects;
+ int i = 0, count;
+ uint32_t exec_start, exec_len;
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ exec_len = (uint32_t) exec->batch_len;
+
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ int ret = i915_emit_box(dev, cliprects, i,
+ exec->DR1, exec->DR4);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ intel_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
+ intel_ring_emit(dev, ring,
+ exec_start | MI_BATCH_NON_SECURE);
+ intel_ring_emit(dev, ring, exec_start + exec_len - 4);
+ intel_ring_emit(dev, ring, 0);
+ } else {
+ intel_ring_begin(dev, ring, 4);
+ if (IS_I965G(dev)) {
+ intel_ring_emit(dev, ring,
+ MI_BATCH_BUFFER_START | (2 << 6)
+ | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(dev, ring, exec_start);
+ } else {
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
+ | (2 << 6));
+ intel_ring_emit(dev, ring, exec_start |
+ MI_BATCH_NON_SECURE);
+ }
+ }
+ intel_ring_advance(dev, ring);
+ }
+
+ /* XXX breadcrumb */
+ return 0;
+}
+
+static void cleanup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = ring->status_page.obj;
+ if (obj == NULL)
+ return;
+ obj_priv = to_intel_bo(obj);
+
+ kunmap(obj_priv->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ ring->status_page.obj = NULL;
+
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+}
+
+static int init_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ obj_priv = to_intel_bo(obj);
+ obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret != 0) {
+ goto err_unref;
+ }
+
+ ring->status_page.gfx_addr = obj_priv->gtt_offset;
+ ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+ if (ring->status_page.page_addr == NULL) {
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+ goto err_unpin;
+ }
+ ring->status_page.obj = obj;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ ring->setup_status_page(dev, ring);
+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ ring->name, ring->status_page.gfx_addr);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return ret;
+}
+
+
+int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ int ret;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ ring->dev = dev;
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ ret = init_status_page(dev, ring);
+ if (ret)
+ return ret;
+ }
+
+ obj = i915_gem_alloc_object(dev, ring->size);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ring->gem_object = obj;
+
+ ret = i915_gem_object_pin(obj, ring->alignment);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ goto cleanup;
+ }
+
+ obj_priv = to_intel_bo(obj);
+ ring->map.size = ring->size;
+ ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
+
+ drm_core_ioremap_wc(&ring->map, dev);
+ if (ring->map.handle == NULL) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ring->virtual_start = ring->map.handle;
+ ret = ring->init(dev, ring);
+ if (ret != 0) {
+ intel_cleanup_ring_buffer(dev, ring);
+ return ret;
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kernel_lost_context(dev);
+ else {
+ ring->head = ring->get_head(dev, ring);
+ ring->tail = ring->get_tail(dev, ring);
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ }
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ return ret;
+cleanup:
+ cleanup_status_page(dev, ring);
+ return ret;
+}
+
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ if (ring->gem_object == NULL)
+ return;
+
+ drm_core_ioremapfree(&ring->map, dev);
+
+ i915_gem_object_unpin(ring->gem_object);
+ drm_gem_object_unreference(ring->gem_object);
+ ring->gem_object = NULL;
+ cleanup_status_page(dev, ring);
+}
+
+int intel_wrap_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ unsigned int *virt;
+ int rem;
+ rem = ring->size - ring->tail;
+
+ if (ring->space < rem) {
+ int ret = intel_wait_ring_buffer(dev, ring, rem);
+ if (ret)
+ return ret;
+ }
+
+ virt = (unsigned int *)(ring->virtual_start + ring->tail);
+ rem /= 4;
+ while (rem--)
+ *virt++ = MI_NOOP;
+
+ ring->tail = 0;
+
+ return 0;
+}
+
+int intel_wait_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int n)
+{
+ unsigned long end;
+
+ trace_i915_ring_wait_begin (dev);
+ end = jiffies + 3 * HZ;
+ do {
+ ring->head = ring->get_head(dev, ring);
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ if (ring->space >= n) {
+ trace_i915_ring_wait_end (dev);
+ return 0;
+ }
+
+ if (dev->primary->master) {
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ }
+
+ yield();
+ } while (!time_after(jiffies, end));
+ trace_i915_ring_wait_end (dev);
+ return -EBUSY;
+}
+
+void intel_ring_begin(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int n)
+{
+ if (unlikely(ring->tail + n > ring->size))
+ intel_wrap_ring_buffer(dev, ring);
+ if (unlikely(ring->space < n))
+ intel_wait_ring_buffer(dev, ring, n);
+}
+
+void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring, unsigned int data)
+{
+ unsigned int *virt = ring->virtual_start + ring->tail;
+ *virt = data;
+ ring->tail += 4;
+ ring->tail &= ring->size - 1;
+ ring->space -= 4;
+}
+
+void intel_ring_advance(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ ring->advance_ring(dev, ring);
+}
+
+void intel_fill_struct(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ void *data,
+ unsigned int len)
+{
+ unsigned int *virt = ring->virtual_start + ring->tail;
+ BUG_ON((len&~(4-1)) != 0);
+ intel_ring_begin(dev, ring, len);
+ memcpy(virt, data, len);
+ ring->tail += len;
+ ring->tail &= ring->size - 1;
+ ring->space -= len;
+ intel_ring_advance(dev, ring);
+}
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ u32 seqno;
+ seqno = ring->next_seqno;
+
+ /* reserve 0 for non-seqno */
+ if (++ring->next_seqno == 0)
+ ring->next_seqno = 1;
+ return seqno;
+}
+
+struct intel_ring_buffer render_ring = {
+ .name = "render ring",
+ .regs = {
+ .ctl = PRB0_CTL,
+ .head = PRB0_HEAD,
+ .tail = PRB0_TAIL,
+ .start = PRB0_START
+ },
+ .ring_flag = I915_EXEC_RENDER,
+ .size = 32 * PAGE_SIZE,
+ .alignment = PAGE_SIZE,
+ .virtual_start = NULL,
+ .dev = NULL,
+ .gem_object = NULL,
+ .head = 0,
+ .tail = 0,
+ .space = 0,
+ .next_seqno = 1,
+ .user_irq_refcount = 0,
+ .irq_gem_seqno = 0,
+ .waiting_gem_seqno = 0,
+ .setup_status_page = render_setup_status_page,
+ .init = init_render_ring,
+ .get_head = render_ring_get_head,
+ .get_tail = render_ring_get_tail,
+ .get_active_head = render_ring_get_active_head,
+ .advance_ring = render_ring_advance_ring,
+ .flush = render_ring_flush,
+ .add_request = render_ring_add_request,
+ .get_gem_seqno = render_ring_get_gem_seqno,
+ .user_irq_get = render_ring_get_user_irq,
+ .user_irq_put = render_ring_put_user_irq,
+ .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+ .status_page = {NULL, 0, NULL},
+ .map = {0,}
+};
+
+/* ring buffer for bit-stream decoder */
+
+struct intel_ring_buffer bsd_ring = {
+ .name = "bsd ring",
+ .regs = {
+ .ctl = BSD_RING_CTL,
+ .head = BSD_RING_HEAD,
+ .tail = BSD_RING_TAIL,
+ .start = BSD_RING_START
+ },
+ .ring_flag = I915_EXEC_BSD,
+ .size = 32 * PAGE_SIZE,
+ .alignment = PAGE_SIZE,
+ .virtual_start = NULL,
+ .dev = NULL,
+ .gem_object = NULL,
+ .head = 0,
+ .tail = 0,
+ .space = 0,
+ .next_seqno = 1,
+ .user_irq_refcount = 0,
+ .irq_gem_seqno = 0,
+ .waiting_gem_seqno = 0,
+ .setup_status_page = bsd_setup_status_page,
+ .init = init_bsd_ring,
+ .get_head = bsd_ring_get_head,
+ .get_tail = bsd_ring_get_tail,
+ .get_active_head = bsd_ring_get_active_head,
+ .advance_ring = bsd_ring_advance_ring,
+ .flush = bsd_ring_flush,
+ .add_request = bsd_ring_add_request,
+ .get_gem_seqno = bsd_ring_get_gem_seqno,
+ .user_irq_get = bsd_ring_get_user_irq,
+ .user_irq_put = bsd_ring_put_user_irq,
+ .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
+ .status_page = {NULL, 0, NULL},
+ .map = {0,}
+};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 000000000000..d5568d3766de
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,124 @@
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+struct intel_hw_status_page {
+ void *page_addr;
+ unsigned int gfx_addr;
+ struct drm_gem_object *obj;
+};
+
+struct drm_i915_gem_execbuffer2;
+struct intel_ring_buffer {
+ const char *name;
+ struct ring_regs {
+ u32 ctl;
+ u32 head;
+ u32 tail;
+ u32 start;
+ } regs;
+ unsigned int ring_flag;
+ unsigned long size;
+ unsigned int alignment;
+ void *virtual_start;
+ struct drm_device *dev;
+ struct drm_gem_object *gem_object;
+
+ unsigned int head;
+ unsigned int tail;
+ unsigned int space;
+ u32 next_seqno;
+ struct intel_hw_status_page status_page;
+
+ u32 irq_gem_seqno; /* last seq seem at irq time */
+ u32 waiting_gem_seqno;
+ int user_irq_refcount;
+ void (*user_irq_get)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*user_irq_put)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*setup_status_page)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+ int (*init)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+ unsigned int (*get_head)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ unsigned int (*get_tail)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ unsigned int (*get_active_head)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*advance_ring)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*flush)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ u32 (*add_request)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_file *file_priv,
+ u32 flush_domains);
+ u32 (*get_gem_seqno)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ int (*dispatch_gem_execbuffer)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset);
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ wait_queue_head_t irq_queue;
+ drm_local_map_t map;
+};
+
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+ int reg)
+{
+ u32 *regs = ring->status_page.page_addr;
+ return regs[reg];
+}
+
+int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+int intel_wait_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int n);
+int intel_wrap_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+void intel_ring_begin(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int n);
+void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring, u32 data);
+void intel_fill_struct(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ void *data,
+ unsigned int len);
+void intel_ring_advance(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+extern struct intel_ring_buffer render_ring;
+extern struct intel_ring_buffer bsd_ring;
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index aba72c489a2f..76993ac16cc1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1479,7 +1479,7 @@ intel_find_analog_connector(struct drm_device *dev)
intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector && encoder == intel_attached_encoder(connector))
+ if (encoder == intel_attached_encoder(connector))
return connector;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index e13f6af0037a..d4bcca8a5133 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -34,7 +34,7 @@
static struct nouveau_dsm_priv {
bool dsm_detected;
acpi_handle dhandle;
- acpi_handle dsm_handle;
+ acpi_handle rom_handle;
} nouveau_dsm_priv;
static const char nouveau_dsm_muid[] = {
@@ -107,9 +107,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
if (id == VGA_SWITCHEROO_IGD)
- return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA);
+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
else
- return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED);
+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED);
}
static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
@@ -118,7 +118,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
if (id == VGA_SWITCHEROO_IGD)
return 0;
- return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state);
+ return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
}
static int nouveau_dsm_init(void)
@@ -151,18 +151,18 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
+
status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
if (ACPI_FAILURE(status)) {
return false;
}
- ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED,
- NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
+ ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
+ NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
if (ret < 0)
return false;
nouveau_dsm_priv.dhandle = dhandle;
- nouveau_dsm_priv.dsm_handle = nvidia_handle;
return true;
}
@@ -173,6 +173,7 @@ static bool nouveau_dsm_detect(void)
struct pci_dev *pdev = NULL;
int has_dsm = 0;
int vga_count = 0;
+
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
@@ -180,7 +181,7 @@ static bool nouveau_dsm_detect(void)
}
if (vga_count == 2 && has_dsm) {
- acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer);
+ acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
@@ -204,3 +205,57 @@ void nouveau_unregister_dsm_handler(void)
{
vga_switcheroo_unregister_handler();
}
+
+/* retrieve the ROM in 4k blocks */
+static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
+ int offset, int len)
+{
+ acpi_status status;
+ union acpi_object rom_arg_elements[2], *obj;
+ struct acpi_object_list rom_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+ rom_arg.count = 2;
+ rom_arg.pointer = &rom_arg_elements[0];
+
+ rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[0].integer.value = offset;
+
+ rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[1].integer.value = len;
+
+ status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status));
+ return -ENODEV;
+ }
+ obj = (union acpi_object *)buffer.pointer;
+ memcpy(bios+offset, obj->buffer.pointer, len);
+ kfree(buffer.pointer);
+ return len;
+}
+
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+{
+ acpi_status status;
+ acpi_handle dhandle, rom_handle;
+
+ if (!nouveau_dsm_priv.dsm_detected)
+ return false;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ nouveau_dsm_priv.rom_handle = rom_handle;
+ return true;
+}
+
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+ return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index e7e69ccce5c9..fc924b649195 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -178,6 +178,25 @@ out:
pci_disable_rom(dev->pdev);
}
+static void load_vbios_acpi(struct drm_device *dev, uint8_t *data)
+{
+ int i;
+ int ret;
+ int size = 64 * 1024;
+
+ if (!nouveau_acpi_rom_supported(dev->pdev))
+ return;
+
+ for (i = 0; i < (size / ROM_BIOS_PAGE); i++) {
+ ret = nouveau_acpi_get_bios_chunk(data,
+ (i * ROM_BIOS_PAGE),
+ ROM_BIOS_PAGE);
+ if (ret <= 0)
+ break;
+ }
+ return;
+}
+
struct methods {
const char desc[8];
void (*loadbios)(struct drm_device *, uint8_t *);
@@ -191,6 +210,7 @@ static struct methods nv04_methods[] = {
};
static struct methods nv50_methods[] = {
+ { "ACPI", load_vbios_acpi, true },
{ "PRAMIN", load_vbios_pramin, true },
{ "PROM", load_vbios_prom, false },
{ "PCIROM", load_vbios_pci, true },
@@ -814,7 +834,7 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index)
if (i2c_index == 0x81)
i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
- if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) {
+ if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
return NULL;
}
@@ -2807,7 +2827,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
- nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
+ BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
+ offset, gpio->tag, gpio->state_default);
+ if (bios->execute)
+ nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
/* The NVIDIA binary driver doesn't appear to actually do
* any of this, my VBIOS does however.
@@ -3897,7 +3920,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
static uint8_t *
bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
- uint16_t record, int record_len, int record_nr)
+ uint16_t record, int record_len, int record_nr,
+ bool match_link)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
@@ -3905,12 +3929,28 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
uint16_t table;
int i, v;
+ switch (dcbent->type) {
+ case OUTPUT_TMDS:
+ case OUTPUT_LVDS:
+ case OUTPUT_DP:
+ break;
+ default:
+ match_link = false;
+ break;
+ }
+
for (i = 0; i < record_nr; i++, record += record_len) {
table = ROM16(bios->data[record]);
if (!table)
continue;
entry = ROM32(bios->data[table]);
+ if (match_link) {
+ v = (entry & 0x00c00000) >> 22;
+ if (!(v & dcbent->sorconf.link))
+ continue;
+ }
+
v = (entry & 0x000f0000) >> 16;
if (!(v & dcbent->or))
continue;
@@ -3952,7 +3992,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
*length = table[4];
return bios_output_config_match(dev, dcbent,
bios->display.dp_table_ptr + table[1],
- table[2], table[3]);
+ table[2], table[3], table[0] >= 0x21);
}
int
@@ -4041,7 +4081,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
dcbent->type, dcbent->location, dcbent->or);
otable = bios_output_config_match(dev, dcbent, table[1] +
bios->display.script_table_ptr,
- table[2], table[3]);
+ table[2], table[3], table[0] >= 0x21);
if (!otable) {
NV_ERROR(dev, "Couldn't find matching output script table\n");
return 1;
@@ -5533,12 +5573,6 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0x3;
entry->or = (conn >> 24) & 0xf;
- /*
- * Normal entries consist of a single bit, but dual link has the
- * next most significant bit set too
- */
- entry->duallink_possible =
- ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
switch (entry->type) {
case OUTPUT_ANALOG:
@@ -5622,6 +5656,16 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
break;
}
+ if (dcb->version < 0x40) {
+ /* Normal entries consist of a single bit, but dual link has
+ * the next most significant bit set too
+ */
+ entry->duallink_possible =
+ ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
+ } else {
+ entry->duallink_possible = (entry->sorconf.link == 3);
+ }
+
/* unsure what DCB version introduces this, 3.0? */
if (conf & 0x100000)
entry->i2c_upper_default = true;
@@ -6205,6 +6249,30 @@ nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
nouveau_i2c_fini(dev, entry);
}
+static bool
+nouveau_bios_posted(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ bool was_locked;
+ unsigned htotal;
+
+ if (dev_priv->chipset >= NV_50) {
+ if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
+ NVReadVgaCrtc(dev, 0, 0x1a) == 0)
+ return false;
+ return true;
+ }
+
+ was_locked = NVLockVgaCrtcs(dev, false);
+ htotal = NVReadVgaCrtc(dev, 0, 0x06);
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8;
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4;
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10;
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11;
+ NVLockVgaCrtcs(dev, was_locked);
+ return (htotal != 0);
+}
+
int
nouveau_bios_init(struct drm_device *dev)
{
@@ -6239,11 +6307,9 @@ nouveau_bios_init(struct drm_device *dev)
bios->execute = false;
/* ... unless card isn't POSTed already */
- if (dev_priv->card_type >= NV_10 &&
- NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
- NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
+ if (!nouveau_bios_posted(dev)) {
NV_INFO(dev, "Adaptor not initialised\n");
- if (dev_priv->card_type < NV_50) {
+ if (dev_priv->card_type < NV_40) {
NV_ERROR(dev, "Unable to POST this chipset\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7e663a79829f..149ed224c3cb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector)
if (nv_encoder && nv_connector->native_mode) {
unsigned status = connector_status_connected;
-#ifdef CONFIG_ACPI
+#if defined(CONFIG_ACPI_BUTTON) || \
+ (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
if (!nouveau_ignorelid && !acpi_lid_open())
status = connector_status_unknown;
#endif
@@ -431,24 +432,27 @@ nouveau_connector_set_property(struct drm_connector *connector,
}
static struct drm_display_mode *
-nouveau_connector_native_mode(struct nouveau_connector *connector)
+nouveau_connector_native_mode(struct drm_connector *connector)
{
- struct drm_device *dev = connector->base.dev;
+ struct drm_connector_helper_funcs *helper = connector->helper_private;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *largest = NULL;
int high_w = 0, high_h = 0, high_v = 0;
- /* Use preferred mode if there is one.. */
- list_for_each_entry(mode, &connector->base.probed_modes, head) {
+ list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
+ if (helper->mode_valid(connector, mode) != MODE_OK)
+ continue;
+
+ /* Use preferred mode if there is one.. */
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
NV_DEBUG_KMS(dev, "native mode from preferred\n");
return drm_mode_duplicate(dev, mode);
}
- }
- /* Otherwise, take the resolution with the largest width, then height,
- * then vertical refresh
- */
- list_for_each_entry(mode, &connector->base.probed_modes, head) {
+ /* Otherwise, take the resolution with the largest width, then
+ * height, then vertical refresh
+ */
if (mode->hdisplay < high_w)
continue;
@@ -552,7 +556,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
*/
if (!nv_connector->native_mode)
nv_connector->native_mode =
- nouveau_connector_native_mode(nv_connector);
+ nouveau_connector_native_mode(connector);
if (ret == 0 && nv_connector->native_mode) {
struct drm_display_mode *mode;
@@ -583,9 +587,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
switch (nv_encoder->dcb->type) {
case OUTPUT_LVDS:
- BUG_ON(!nv_connector->native_mode);
- if (mode->hdisplay > nv_connector->native_mode->hdisplay ||
- mode->vdisplay > nv_connector->native_mode->vdisplay)
+ if (nv_connector->native_mode &&
+ (mode->hdisplay > nv_connector->native_mode->hdisplay ||
+ mode->vdisplay > nv_connector->native_mode->vdisplay))
return MODE_PANEL;
min_clock = 0;
@@ -593,8 +597,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
break;
case OUTPUT_TMDS:
if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
- (dev_priv->card_type < NV_50 &&
- !nv_encoder->dcb->duallink_possible))
+ !nv_encoder->dcb->duallink_possible)
max_clock = 165000;
else
max_clock = 330000;
@@ -728,7 +731,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
if (ret == 0)
goto out;
nv_connector->detected_encoder = nv_encoder;
- nv_connector->native_mode = nouveau_connector_native_mode(nv_connector);
+ nv_connector->native_mode = nouveau_connector_native_mode(connector);
list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
drm_mode_remove(connector, mode);
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 49fa7b2d257e..cb1ce2a09162 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -40,6 +40,8 @@ struct nouveau_crtc {
int sharpness;
int last_dpms;
+ int cursor_saved_x, cursor_saved_y;
+
struct {
int cpp;
bool blanked;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index c6079e36669d..273770432298 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -175,6 +175,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
nouveau_bo_unpin(nouveau_fb->nvbo);
}
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ }
+
NV_INFO(dev, "Evicting buffers...\n");
ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
@@ -314,12 +321,34 @@ nouveau_pci_resume(struct pci_dev *pdev)
nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
}
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ NV_ERROR(dev, "Could not pin/map cursor.\n");
+ }
+
if (dev_priv->card_type < NV_50) {
nv04_display_restore(dev);
NVLockVgaCrtcs(dev, false);
} else
nv50_display_init(dev);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->cursor.set_offset(nv_crtc,
+ nv_crtc->cursor.nvbo->bo.offset -
+ dev_priv->vm_vram_base);
+
+ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+ nv_crtc->cursor_saved_y);
+ }
+
/* Force CLUT to get re-loaded during modeset */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5b134438effe..c69719106489 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -851,12 +851,17 @@ extern int nouveau_dma_init(struct nouveau_channel *);
extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
/* nouveau_acpi.c */
+#define ROM_BIOS_PAGE 4096
#if defined(CONFIG_ACPI)
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
#else
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
+static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
#endif
/* nouveau_backlight.c */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index fd4a2df715e9..c9a4a0d2a115 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -377,6 +377,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fbdev *nfbdev;
+ int ret;
nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
if (!nfbdev)
@@ -386,7 +387,12 @@ int nouveau_fbcon_init(struct drm_device *dev)
dev_priv->nfbdev = nfbdev;
nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
- drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
+ ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
+ if (ret) {
+ kfree(nfbdev);
+ return ret;
+ }
+
drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
drm_fb_helper_initial_config(&nfbdev->helper, 32);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 775a7017af64..c1fd42b0dad1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -540,7 +540,8 @@ nouveau_mem_detect(struct drm_device *dev)
dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA);
dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
- dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
+ dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
+ dev_priv->vram_sys_base <<= 12;
}
NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e632339c323e..b02a231d6937 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -376,12 +376,15 @@ out_err:
static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
+ struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
nouveau_pci_resume(pdev);
+ drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+ drm_kms_helper_poll_disable(dev);
nouveau_pci_suspend(pdev, pmm);
}
}
@@ -776,29 +779,24 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
return ret;
}
- /* map larger RAMIN aperture on NV40 cards */
- dev_priv->ramin = NULL;
+ /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */
if (dev_priv->card_type >= NV_40) {
int ramin_bar = 2;
if (pci_resource_len(dev->pdev, ramin_bar) == 0)
ramin_bar = 3;
dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
- dev_priv->ramin = ioremap(
- pci_resource_start(dev->pdev, ramin_bar),
+ dev_priv->ramin =
+ ioremap(pci_resource_start(dev->pdev, ramin_bar),
dev_priv->ramin_size);
if (!dev_priv->ramin) {
- NV_ERROR(dev, "Failed to init RAMIN mapping, "
- "limited instance memory available\n");
+ NV_ERROR(dev, "Failed to PRAMIN BAR");
+ return -ENOMEM;
}
- }
-
- /* On older cards (or if the above failed), create a map covering
- * the BAR0 PRAMIN aperture */
- if (!dev_priv->ramin) {
+ } else {
dev_priv->ramin_size = 1 * 1024 * 1024;
dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
- dev_priv->ramin_size);
+ dev_priv->ramin_size);
if (!dev_priv->ramin) {
NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
return -ENOMEM;
@@ -913,6 +911,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
getparam->value = dev_priv->vm_vram_base;
break;
+ case NOUVEAU_GETPARAM_PTIMER_TIME:
+ getparam->value = dev_priv->engine.timer.read(dev);
+ break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
* address is the same. User is supposed to know the card
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
index 89a91b9d8b25..aaf3de3bc816 100644
--- a/drivers/gpu/drm/nouveau/nv04_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
static void
nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
{
+ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
NV_PRAMDAC_CU_START_POS,
XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 0616c96e4b67..704a25d04ac9 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev)
if (!dev_priv->engine.graph.ctxprog) {
struct nouveau_grctx ctx = {};
- uint32_t cp[256];
+ uint32_t *cp;
+
+ cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
ctx.dev = dev;
ctx.mode = NOUVEAU_GRCTX_PROG;
@@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+
+ kfree(cp);
}
/* No context present currently */
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 753e723adb3a..03ad7ab14f09 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
{
struct drm_device *dev = nv_crtc->base.dev;
+ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
((y & 0xFFFF) << 16) | (x & 0xFFFF));
/* Needed to make the cursor move. */
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index a95e6941ba88..32611bd30e6d 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -6,10 +6,16 @@
int
nv50_fb_init(struct drm_device *dev)
{
- /* This is needed to get meaningful information from 100c90
- * on traps. No idea what these values mean exactly. */
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ /* Not a clue what this is exactly. Without pointing it at a
+ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+ * cause IOMMU "read from address 0" errors (rh#561267)
+ */
+ nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8);
+
+ /* This is needed to get meaningful information from 100c90
+ * on traps. No idea what these values mean exactly. */
switch (dev_priv->chipset) {
case 0x50:
nv_wr32(dev, 0x100c90, 0x0707ff);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index c61782b314e7..bb47ad737267 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -31,7 +31,7 @@ nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
{
const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
- if (gpio->line > 32)
+ if (gpio->line >= 32)
return -EINVAL;
*reg = nv50_gpio_reg[gpio->line >> 3];
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index b11eaf9c5c7c..812778db76ac 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -274,7 +274,6 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
int
nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_encoder *nv_encoder = NULL;
struct drm_encoder *encoder;
bool dum;
@@ -324,11 +323,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
uint32_t tmp;
- if (dev_priv->chipset < 0x90 ||
- dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
- tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
- else
- tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
+ tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
switch ((tmp & 0x00000f00) >> 8) {
case 8:
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 3c91312dea9a..84b1f2729d43 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -33,6 +33,9 @@ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
$(call if_changed,mkregtable)
+$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -47,6 +50,8 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h
$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
+$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h
+
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
# add KMS driver
@@ -60,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
- evergreen.o
+ evergreen.o evergreen_cs.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 03dd6c41dc19..f3f2827017ef 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -707,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
+ default:
pll = &rdev->clock.dcpll;
break;
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8c8e4d3cbaa3..4b6623df3b96 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -41,7 +41,18 @@ void evergreen_fini(struct radeon_device *rdev);
void evergreen_pm_misc(struct radeon_device *rdev)
{
-
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ if (voltage->voltage != rdev->pm.current_vddc) {
+ radeon_atom_set_voltage(rdev, voltage->voltage);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ }
+ }
}
void evergreen_pm_prepare(struct radeon_device *rdev)
@@ -2148,7 +2159,7 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->accel_working = false;
+ rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
new file mode 100644
index 000000000000..64516b950891
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -0,0 +1,1356 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon.h"
+#include "evergreend.h"
+#include "evergreen_reg_safe.h"
+
+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
+
+struct evergreen_cs_track {
+ u32 group_size;
+ u32 nbanks;
+ u32 npipes;
+ /* value we track */
+ u32 nsamples;
+ u32 cb_color_base_last[12];
+ struct radeon_bo *cb_color_bo[12];
+ u32 cb_color_bo_offset[12];
+ struct radeon_bo *cb_color_fmask_bo[8];
+ struct radeon_bo *cb_color_cmask_bo[8];
+ u32 cb_color_info[12];
+ u32 cb_color_view[12];
+ u32 cb_color_pitch_idx[12];
+ u32 cb_color_slice_idx[12];
+ u32 cb_color_dim_idx[12];
+ u32 cb_color_dim[12];
+ u32 cb_color_pitch[12];
+ u32 cb_color_slice[12];
+ u32 cb_color_cmask_slice[8];
+ u32 cb_color_fmask_slice[8];
+ u32 cb_target_mask;
+ u32 cb_shader_mask;
+ u32 vgt_strmout_config;
+ u32 vgt_strmout_buffer_config;
+ u32 db_depth_control;
+ u32 db_depth_view;
+ u32 db_depth_size;
+ u32 db_depth_size_idx;
+ u32 db_z_info;
+ u32 db_z_idx;
+ u32 db_z_read_offset;
+ u32 db_z_write_offset;
+ struct radeon_bo *db_z_read_bo;
+ struct radeon_bo *db_z_write_bo;
+ u32 db_s_info;
+ u32 db_s_idx;
+ u32 db_s_read_offset;
+ u32 db_s_write_offset;
+ struct radeon_bo *db_s_read_bo;
+ struct radeon_bo *db_s_write_bo;
+};
+
+static void evergreen_cs_track_init(struct evergreen_cs_track *track)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ track->cb_color_fmask_bo[i] = NULL;
+ track->cb_color_cmask_bo[i] = NULL;
+ track->cb_color_cmask_slice[i] = 0;
+ track->cb_color_fmask_slice[i] = 0;
+ }
+
+ for (i = 0; i < 12; i++) {
+ track->cb_color_base_last[i] = 0;
+ track->cb_color_bo[i] = NULL;
+ track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ track->cb_color_info[i] = 0;
+ track->cb_color_view[i] = 0;
+ track->cb_color_pitch_idx[i] = 0;
+ track->cb_color_slice_idx[i] = 0;
+ track->cb_color_dim[i] = 0;
+ track->cb_color_pitch[i] = 0;
+ track->cb_color_slice[i] = 0;
+ track->cb_color_dim[i] = 0;
+ }
+ track->cb_target_mask = 0xFFFFFFFF;
+ track->cb_shader_mask = 0xFFFFFFFF;
+
+ track->db_depth_view = 0xFFFFC000;
+ track->db_depth_size = 0xFFFFFFFF;
+ track->db_depth_size_idx = 0;
+ track->db_depth_control = 0xFFFFFFFF;
+ track->db_z_info = 0xFFFFFFFF;
+ track->db_z_idx = 0xFFFFFFFF;
+ track->db_z_read_offset = 0xFFFFFFFF;
+ track->db_z_write_offset = 0xFFFFFFFF;
+ track->db_z_read_bo = NULL;
+ track->db_z_write_bo = NULL;
+ track->db_s_info = 0xFFFFFFFF;
+ track->db_s_idx = 0xFFFFFFFF;
+ track->db_s_read_offset = 0xFFFFFFFF;
+ track->db_s_write_offset = 0xFFFFFFFF;
+ track->db_s_read_bo = NULL;
+ track->db_s_write_bo = NULL;
+}
+
+static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+{
+ /* XXX fill in */
+ return 0;
+}
+
+static int evergreen_cs_track_check(struct radeon_cs_parser *p)
+{
+ struct evergreen_cs_track *track = p->track;
+
+ /* we don't support stream out buffer yet */
+ if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
+ dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
+ return -EINVAL;
+ }
+
+ /* XXX fill in */
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet informations
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = CP_PACKET_GET_TYPE(header);
+ pkt->count = CP_PACKET_GET_COUNT(header);
+ pkt->one_reg_wr = 0;
+ switch (pkt->type) {
+ case PACKET_TYPE0:
+ pkt->reg = CP_PACKET0_GET_REG(header);
+ break;
+ case PACKET_TYPE3:
+ pkt->opcode = CP_PACKET3_GET_OPCODE(header);
+ break;
+ case PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
+ * @parser: parser structure holding parsing context.
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return 0;
+ }
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
+ * @parser: parser structure holding parsing context.
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET3 - WAIT_REG_MEM poll vline status reg
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT_REG_MEM packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case.
+ */
+static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ struct radeon_cs_packet p3reloc, wait_reg_mem;
+ int crtc_id;
+ int r;
+ uint32_t header, h_idx, reg, wait_reg_mem_info;
+ volatile uint32_t *ib;
+
+ ib = p->ib->ptr;
+
+ /* parse the WAIT_REG_MEM */
+ r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
+ if (r)
+ return r;
+
+ /* check its a WAIT_REG_MEM */
+ if (wait_reg_mem.type != PACKET_TYPE3 ||
+ wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
+ DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+ r = -EINVAL;
+ return r;
+ }
+
+ wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
+ /* bit 4 is reg (0) or mem (1) */
+ if (wait_reg_mem_info & 0x10) {
+ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
+ r = -EINVAL;
+ return r;
+ }
+ /* waiting for value to be equal */
+ if ((wait_reg_mem_info & 0x7) != 0x3) {
+ DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+ r = -EINVAL;
+ return r;
+ }
+ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
+ DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+ r = -EINVAL;
+ return r;
+ }
+
+ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
+ DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+ r = -EINVAL;
+ return r;
+ }
+
+ /* jump over the NOP */
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+ if (r)
+ return r;
+
+ h_idx = p->idx - 2;
+ p->idx += wait_reg_mem.count + 2;
+ p->idx += p3reloc.count + 2;
+
+ header = radeon_get_ib_value(p, h_idx);
+ crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
+ reg = CP_PACKET0_GET_REG(header);
+ mutex_lock(&p->rdev->ddev->mode_config.mutex);
+ obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ r = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+ radeon_crtc = to_radeon_crtc(crtc);
+ crtc_id = radeon_crtc->crtc_id;
+
+ if (!crtc->enabled) {
+ /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+ ib[h_idx + 2] = PACKET2(0);
+ ib[h_idx + 3] = PACKET2(0);
+ ib[h_idx + 4] = PACKET2(0);
+ ib[h_idx + 5] = PACKET2(0);
+ ib[h_idx + 6] = PACKET2(0);
+ ib[h_idx + 7] = PACKET2(0);
+ ib[h_idx + 8] = PACKET2(0);
+ } else {
+ switch (reg) {
+ case EVERGREEN_VLINE_START_END:
+ header &= ~R600_CP_PACKET0_REG_MASK;
+ header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
+ ib[h_idx] = header;
+ ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
+ break;
+ default:
+ DRM_ERROR("unknown crtc reloc\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&p->rdev->ddev->mode_config.mutex);
+ return r;
+}
+
+static int evergreen_packet0_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg)
+{
+ int r;
+
+ switch (reg) {
+ case EVERGREEN_VLINE_START_END:
+ r = evergreen_cs_packet_parse_vline(p);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ return r;
+ }
+ break;
+ default:
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+ reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ unsigned reg, i;
+ unsigned idx;
+ int r;
+
+ idx = pkt->idx + 1;
+ reg = pkt->reg;
+ for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+ r = evergreen_packet0_check(p, pkt, idx, reg);
+ if (r) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+/**
+ * evergreen_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against evergreen_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+ struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
+ struct radeon_cs_reloc *reloc;
+ u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+ u32 m, i, tmp, *ib;
+ int r;
+
+ i = (reg >> 7);
+ if (i > last_reg) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (!(evergreen_reg_safe_bm[i] & m))
+ return 0;
+ ib = p->ib->ptr;
+ switch (reg) {
+ /* force following reg to 0 in an attemp to disable out buffer
+ * which will need us to better understand how it works to perform
+ * security check on it (Jerome)
+ */
+ case SQ_ESGS_RING_SIZE:
+ case SQ_GSVS_RING_SIZE:
+ case SQ_ESTMP_RING_SIZE:
+ case SQ_GSTMP_RING_SIZE:
+ case SQ_HSTMP_RING_SIZE:
+ case SQ_LSTMP_RING_SIZE:
+ case SQ_PSTMP_RING_SIZE:
+ case SQ_VSTMP_RING_SIZE:
+ case SQ_ESGS_RING_ITEMSIZE:
+ case SQ_ESTMP_RING_ITEMSIZE:
+ case SQ_GSTMP_RING_ITEMSIZE:
+ case SQ_GSVS_RING_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE_1:
+ case SQ_GS_VERT_ITEMSIZE_2:
+ case SQ_GS_VERT_ITEMSIZE_3:
+ case SQ_GSVS_RING_OFFSET_1:
+ case SQ_GSVS_RING_OFFSET_2:
+ case SQ_GSVS_RING_OFFSET_3:
+ case SQ_HSTMP_RING_ITEMSIZE:
+ case SQ_LSTMP_RING_ITEMSIZE:
+ case SQ_PSTMP_RING_ITEMSIZE:
+ case SQ_VSTMP_RING_ITEMSIZE:
+ case VGT_TF_RING_SIZE:
+ /* get value to populate the IB don't remove */
+ tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;
+ break;
+ case DB_DEPTH_CONTROL:
+ track->db_depth_control = radeon_get_ib_value(p, idx);
+ break;
+ case DB_Z_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_z_info = radeon_get_ib_value(p, idx);
+ ib[idx] &= ~Z_ARRAY_MODE(0xf);
+ track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else {
+ ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
+ break;
+ case DB_STENCIL_INFO:
+ track->db_s_info = radeon_get_ib_value(p, idx);
+ break;
+ case DB_DEPTH_VIEW:
+ track->db_depth_view = radeon_get_ib_value(p, idx);
+ break;
+ case DB_DEPTH_SIZE:
+ track->db_depth_size = radeon_get_ib_value(p, idx);
+ track->db_depth_size_idx = idx;
+ break;
+ case DB_Z_READ_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_z_read_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_z_read_bo = reloc->robj;
+ break;
+ case DB_Z_WRITE_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_z_write_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_z_write_bo = reloc->robj;
+ break;
+ case DB_STENCIL_READ_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_s_read_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_s_read_bo = reloc->robj;
+ break;
+ case DB_STENCIL_WRITE_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_s_write_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_s_write_bo = reloc->robj;
+ break;
+ case VGT_STRMOUT_CONFIG:
+ track->vgt_strmout_config = radeon_get_ib_value(p, idx);
+ break;
+ case VGT_STRMOUT_BUFFER_CONFIG:
+ track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
+ break;
+ case CB_TARGET_MASK:
+ track->cb_target_mask = radeon_get_ib_value(p, idx);
+ break;
+ case CB_SHADER_MASK:
+ track->cb_shader_mask = radeon_get_ib_value(p, idx);
+ break;
+ case PA_SC_AA_CONFIG:
+ tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
+ track->nsamples = 1 << tmp;
+ break;
+ case CB_COLOR0_VIEW:
+ case CB_COLOR1_VIEW:
+ case CB_COLOR2_VIEW:
+ case CB_COLOR3_VIEW:
+ case CB_COLOR4_VIEW:
+ case CB_COLOR5_VIEW:
+ case CB_COLOR6_VIEW:
+ case CB_COLOR7_VIEW:
+ tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR8_VIEW:
+ case CB_COLOR9_VIEW:
+ case CB_COLOR10_VIEW:
+ case CB_COLOR11_VIEW:
+ tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR0_INFO:
+ case CB_COLOR1_INFO:
+ case CB_COLOR2_INFO:
+ case CB_COLOR3_INFO:
+ case CB_COLOR4_INFO:
+ case CB_COLOR5_INFO:
+ case CB_COLOR6_INFO:
+ case CB_COLOR7_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_INFO) / 0x3c;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
+ break;
+ case CB_COLOR8_INFO:
+ case CB_COLOR9_INFO:
+ case CB_COLOR10_INFO:
+ case CB_COLOR11_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
+ break;
+ case CB_COLOR0_PITCH:
+ case CB_COLOR1_PITCH:
+ case CB_COLOR2_PITCH:
+ case CB_COLOR3_PITCH:
+ case CB_COLOR4_PITCH:
+ case CB_COLOR5_PITCH:
+ case CB_COLOR6_PITCH:
+ case CB_COLOR7_PITCH:
+ tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
+ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_pitch_idx[tmp] = idx;
+ break;
+ case CB_COLOR8_PITCH:
+ case CB_COLOR9_PITCH:
+ case CB_COLOR10_PITCH:
+ case CB_COLOR11_PITCH:
+ tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
+ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_pitch_idx[tmp] = idx;
+ break;
+ case CB_COLOR0_SLICE:
+ case CB_COLOR1_SLICE:
+ case CB_COLOR2_SLICE:
+ case CB_COLOR3_SLICE:
+ case CB_COLOR4_SLICE:
+ case CB_COLOR5_SLICE:
+ case CB_COLOR6_SLICE:
+ case CB_COLOR7_SLICE:
+ tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
+ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
+ break;
+ case CB_COLOR8_SLICE:
+ case CB_COLOR9_SLICE:
+ case CB_COLOR10_SLICE:
+ case CB_COLOR11_SLICE:
+ tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
+ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
+ break;
+ case CB_COLOR0_ATTRIB:
+ case CB_COLOR1_ATTRIB:
+ case CB_COLOR2_ATTRIB:
+ case CB_COLOR3_ATTRIB:
+ case CB_COLOR4_ATTRIB:
+ case CB_COLOR5_ATTRIB:
+ case CB_COLOR6_ATTRIB:
+ case CB_COLOR7_ATTRIB:
+ case CB_COLOR8_ATTRIB:
+ case CB_COLOR9_ATTRIB:
+ case CB_COLOR10_ATTRIB:
+ case CB_COLOR11_ATTRIB:
+ break;
+ case CB_COLOR0_DIM:
+ case CB_COLOR1_DIM:
+ case CB_COLOR2_DIM:
+ case CB_COLOR3_DIM:
+ case CB_COLOR4_DIM:
+ case CB_COLOR5_DIM:
+ case CB_COLOR6_DIM:
+ case CB_COLOR7_DIM:
+ tmp = (reg - CB_COLOR0_DIM) / 0x3c;
+ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_dim_idx[tmp] = idx;
+ break;
+ case CB_COLOR8_DIM:
+ case CB_COLOR9_DIM:
+ case CB_COLOR10_DIM:
+ case CB_COLOR11_DIM:
+ tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
+ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_dim_idx[tmp] = idx;
+ break;
+ case CB_COLOR0_FMASK:
+ case CB_COLOR1_FMASK:
+ case CB_COLOR2_FMASK:
+ case CB_COLOR3_FMASK:
+ case CB_COLOR4_FMASK:
+ case CB_COLOR5_FMASK:
+ case CB_COLOR6_FMASK:
+ case CB_COLOR7_FMASK:
+ tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_fmask_bo[tmp] = reloc->robj;
+ break;
+ case CB_COLOR0_CMASK:
+ case CB_COLOR1_CMASK:
+ case CB_COLOR2_CMASK:
+ case CB_COLOR3_CMASK:
+ case CB_COLOR4_CMASK:
+ case CB_COLOR5_CMASK:
+ case CB_COLOR6_CMASK:
+ case CB_COLOR7_CMASK:
+ tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_cmask_bo[tmp] = reloc->robj;
+ break;
+ case CB_COLOR0_FMASK_SLICE:
+ case CB_COLOR1_FMASK_SLICE:
+ case CB_COLOR2_FMASK_SLICE:
+ case CB_COLOR3_FMASK_SLICE:
+ case CB_COLOR4_FMASK_SLICE:
+ case CB_COLOR5_FMASK_SLICE:
+ case CB_COLOR6_FMASK_SLICE:
+ case CB_COLOR7_FMASK_SLICE:
+ tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
+ track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR0_CMASK_SLICE:
+ case CB_COLOR1_CMASK_SLICE:
+ case CB_COLOR2_CMASK_SLICE:
+ case CB_COLOR3_CMASK_SLICE:
+ case CB_COLOR4_CMASK_SLICE:
+ case CB_COLOR5_CMASK_SLICE:
+ case CB_COLOR6_CMASK_SLICE:
+ case CB_COLOR7_CMASK_SLICE:
+ tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
+ track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR0_BASE:
+ case CB_COLOR1_BASE:
+ case CB_COLOR2_BASE:
+ case CB_COLOR3_BASE:
+ case CB_COLOR4_BASE:
+ case CB_COLOR5_BASE:
+ case CB_COLOR6_BASE:
+ case CB_COLOR7_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_BASE) / 0x3c;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+ break;
+ case CB_COLOR8_BASE:
+ case CB_COLOR9_BASE:
+ case CB_COLOR10_BASE:
+ case CB_COLOR11_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+ break;
+ case CB_IMMED0_BASE:
+ case CB_IMMED1_BASE:
+ case CB_IMMED2_BASE:
+ case CB_IMMED3_BASE:
+ case CB_IMMED4_BASE:
+ case CB_IMMED5_BASE:
+ case CB_IMMED6_BASE:
+ case CB_IMMED7_BASE:
+ case CB_IMMED8_BASE:
+ case CB_IMMED9_BASE:
+ case CB_IMMED10_BASE:
+ case CB_IMMED11_BASE:
+ case DB_HTILE_DATA_BASE:
+ case SQ_PGM_START_FS:
+ case SQ_PGM_START_ES:
+ case SQ_PGM_START_VS:
+ case SQ_PGM_START_GS:
+ case SQ_PGM_START_PS:
+ case SQ_PGM_START_HS:
+ case SQ_PGM_START_LS:
+ case GDS_ADDR_BASE:
+ case SQ_CONST_MEM_BASE:
+ case SQ_ALU_CONST_CACHE_GS_0:
+ case SQ_ALU_CONST_CACHE_GS_1:
+ case SQ_ALU_CONST_CACHE_GS_2:
+ case SQ_ALU_CONST_CACHE_GS_3:
+ case SQ_ALU_CONST_CACHE_GS_4:
+ case SQ_ALU_CONST_CACHE_GS_5:
+ case SQ_ALU_CONST_CACHE_GS_6:
+ case SQ_ALU_CONST_CACHE_GS_7:
+ case SQ_ALU_CONST_CACHE_GS_8:
+ case SQ_ALU_CONST_CACHE_GS_9:
+ case SQ_ALU_CONST_CACHE_GS_10:
+ case SQ_ALU_CONST_CACHE_GS_11:
+ case SQ_ALU_CONST_CACHE_GS_12:
+ case SQ_ALU_CONST_CACHE_GS_13:
+ case SQ_ALU_CONST_CACHE_GS_14:
+ case SQ_ALU_CONST_CACHE_GS_15:
+ case SQ_ALU_CONST_CACHE_PS_0:
+ case SQ_ALU_CONST_CACHE_PS_1:
+ case SQ_ALU_CONST_CACHE_PS_2:
+ case SQ_ALU_CONST_CACHE_PS_3:
+ case SQ_ALU_CONST_CACHE_PS_4:
+ case SQ_ALU_CONST_CACHE_PS_5:
+ case SQ_ALU_CONST_CACHE_PS_6:
+ case SQ_ALU_CONST_CACHE_PS_7:
+ case SQ_ALU_CONST_CACHE_PS_8:
+ case SQ_ALU_CONST_CACHE_PS_9:
+ case SQ_ALU_CONST_CACHE_PS_10:
+ case SQ_ALU_CONST_CACHE_PS_11:
+ case SQ_ALU_CONST_CACHE_PS_12:
+ case SQ_ALU_CONST_CACHE_PS_13:
+ case SQ_ALU_CONST_CACHE_PS_14:
+ case SQ_ALU_CONST_CACHE_PS_15:
+ case SQ_ALU_CONST_CACHE_VS_0:
+ case SQ_ALU_CONST_CACHE_VS_1:
+ case SQ_ALU_CONST_CACHE_VS_2:
+ case SQ_ALU_CONST_CACHE_VS_3:
+ case SQ_ALU_CONST_CACHE_VS_4:
+ case SQ_ALU_CONST_CACHE_VS_5:
+ case SQ_ALU_CONST_CACHE_VS_6:
+ case SQ_ALU_CONST_CACHE_VS_7:
+ case SQ_ALU_CONST_CACHE_VS_8:
+ case SQ_ALU_CONST_CACHE_VS_9:
+ case SQ_ALU_CONST_CACHE_VS_10:
+ case SQ_ALU_CONST_CACHE_VS_11:
+ case SQ_ALU_CONST_CACHE_VS_12:
+ case SQ_ALU_CONST_CACHE_VS_13:
+ case SQ_ALU_CONST_CACHE_VS_14:
+ case SQ_ALU_CONST_CACHE_VS_15:
+ case SQ_ALU_CONST_CACHE_HS_0:
+ case SQ_ALU_CONST_CACHE_HS_1:
+ case SQ_ALU_CONST_CACHE_HS_2:
+ case SQ_ALU_CONST_CACHE_HS_3:
+ case SQ_ALU_CONST_CACHE_HS_4:
+ case SQ_ALU_CONST_CACHE_HS_5:
+ case SQ_ALU_CONST_CACHE_HS_6:
+ case SQ_ALU_CONST_CACHE_HS_7:
+ case SQ_ALU_CONST_CACHE_HS_8:
+ case SQ_ALU_CONST_CACHE_HS_9:
+ case SQ_ALU_CONST_CACHE_HS_10:
+ case SQ_ALU_CONST_CACHE_HS_11:
+ case SQ_ALU_CONST_CACHE_HS_12:
+ case SQ_ALU_CONST_CACHE_HS_13:
+ case SQ_ALU_CONST_CACHE_HS_14:
+ case SQ_ALU_CONST_CACHE_HS_15:
+ case SQ_ALU_CONST_CACHE_LS_0:
+ case SQ_ALU_CONST_CACHE_LS_1:
+ case SQ_ALU_CONST_CACHE_LS_2:
+ case SQ_ALU_CONST_CACHE_LS_3:
+ case SQ_ALU_CONST_CACHE_LS_4:
+ case SQ_ALU_CONST_CACHE_LS_5:
+ case SQ_ALU_CONST_CACHE_LS_6:
+ case SQ_ALU_CONST_CACHE_LS_7:
+ case SQ_ALU_CONST_CACHE_LS_8:
+ case SQ_ALU_CONST_CACHE_LS_9:
+ case SQ_ALU_CONST_CACHE_LS_10:
+ case SQ_ALU_CONST_CACHE_LS_11:
+ case SQ_ALU_CONST_CACHE_LS_12:
+ case SQ_ALU_CONST_CACHE_LS_13:
+ case SQ_ALU_CONST_CACHE_LS_14:
+ case SQ_ALU_CONST_CACHE_LS_15:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ default:
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * evergreen_check_texture_resource() - check if register is authorized or not
+ * @p: parser structure holding parsing context
+ * @idx: index into the cs buffer
+ * @texture: texture's bo structure
+ * @mipmap: mipmap's bo structure
+ *
+ * This function will check that the resource has valid field and that
+ * the texture and mipmap bo object are big enough to cover this resource.
+ */
+static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap)
+{
+ /* XXX fill in */
+ return 0;
+}
+
+static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ struct radeon_cs_reloc *reloc;
+ struct evergreen_cs_track *track;
+ volatile u32 *ib;
+ unsigned idx;
+ unsigned i;
+ unsigned start_reg, end_reg, reg;
+ int r;
+ u32 idx_value;
+
+ track = (struct evergreen_cs_track *)p->track;
+ ib = p->ib->ptr;
+ idx = pkt->idx + 1;
+ idx_value = radeon_get_ib_value(p, idx);
+
+ switch (pkt->opcode) {
+ case PACKET3_CONTEXT_CONTROL:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad CONTEXT_CONTROL\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_INDEX_TYPE:
+ case PACKET3_NUM_INSTANCES:
+ case PACKET3_CLEAR_STATE:
+ if (pkt->count) {
+ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_INDEX_BASE:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad INDEX_BASE\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad INDEX_BASE\n");
+ return -EINVAL;
+ }
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DRAW_INDEX\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad DRAW_INDEX\n");
+ return -EINVAL;
+ }
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_2:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad DRAW_INDEX_2\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad DRAW_INDEX_2\n");
+ return -EINVAL;
+ }
+ ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_AUTO:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_MULTI_AUTO:
+ if (pkt->count != 2) {
+ DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_IMMD:
+ if (pkt->count < 2) {
+ DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_OFFSET:
+ if (pkt->count != 2) {
+ DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_OFFSET_2:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_WAIT_REG_MEM:
+ if (pkt->count != 5) {
+ DRM_ERROR("bad WAIT_REG_MEM\n");
+ return -EINVAL;
+ }
+ /* bit 4 is reg (0) or mem (1) */
+ if (idx_value & 0x10) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad WAIT_REG_MEM\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ }
+ break;
+ case PACKET3_SURFACE_SYNC:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad SURFACE_SYNC\n");
+ return -EINVAL;
+ }
+ /* 0xffffffff/0x0 is flush all cache flag */
+ if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+ radeon_get_ib_value(p, idx + 2) != 0) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SURFACE_SYNC\n");
+ return -EINVAL;
+ }
+ ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
+ case PACKET3_EVENT_WRITE:
+ if (pkt->count != 2 && pkt->count != 0) {
+ DRM_ERROR("bad EVENT_WRITE\n");
+ return -EINVAL;
+ }
+ if (pkt->count) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ }
+ break;
+ case PACKET3_EVENT_WRITE_EOP:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ break;
+ case PACKET3_EVENT_WRITE_EOS:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ break;
+ case PACKET3_SET_CONFIG_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+ (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = evergreen_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
+ }
+ break;
+ case PACKET3_SET_CONTEXT_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
+ (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
+ (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = evergreen_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
+ }
+ break;
+ case PACKET3_SET_RESOURCE:
+ if (pkt->count % 8) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_RESOURCE_START) ||
+ (start_reg >= PACKET3_SET_RESOURCE_END) ||
+ (end_reg >= PACKET3_SET_RESOURCE_END)) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < (pkt->count / 8); i++) {
+ struct radeon_bo *texture, *mipmap;
+ u32 size, offset;
+
+ switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
+ case SQ_TEX_VTX_VALID_TEXTURE:
+ /* tex base */
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ return -EINVAL;
+ }
+ ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ texture = reloc->robj;
+ /* tex mip base */
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ return -EINVAL;
+ }
+ ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mipmap = reloc->robj;
+ r = evergreen_check_texture_resource(p, idx+1+(i*8),
+ texture, mipmap);
+ if (r)
+ return r;
+ break;
+ case SQ_TEX_VTX_VALID_BUFFER:
+ /* vtx base */
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (vtx)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
+ size = radeon_get_ib_value(p, idx+1+(i*8)+1);
+ if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+ /* force size to size of the buffer */
+ dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+ ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
+ }
+ ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
+ ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ break;
+ case SQ_TEX_VTX_INVALID_TEXTURE:
+ case SQ_TEX_VTX_INVALID_BUFFER:
+ default:
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ case PACKET3_SET_ALU_CONST:
+ /* XXX fix me ALU const buffers only */
+ break;
+ case PACKET3_SET_BOOL_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
+ (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
+ (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
+ DRM_ERROR("bad SET_BOOL_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_LOOP_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
+ (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
+ (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
+ DRM_ERROR("bad SET_LOOP_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_CTL_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
+ (start_reg >= PACKET3_SET_CTL_CONST_END) ||
+ (end_reg >= PACKET3_SET_CTL_CONST_END)) {
+ DRM_ERROR("bad SET_CTL_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_SAMPLER:
+ if (pkt->count % 3) {
+ DRM_ERROR("bad SET_SAMPLER\n");
+ return -EINVAL;
+ }
+ start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_SAMPLER_START) ||
+ (start_reg >= PACKET3_SET_SAMPLER_END) ||
+ (end_reg >= PACKET3_SET_SAMPLER_END)) {
+ DRM_ERROR("bad SET_SAMPLER\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_NOP:
+ break;
+ default:
+ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int evergreen_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ struct evergreen_cs_track *track;
+ int r;
+
+ if (p->track == NULL) {
+ /* initialize tracker, we are in kms */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ evergreen_cs_track_init(track);
+ track->npipes = p->rdev->config.evergreen.tiling_npipes;
+ track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
+ track->group_size = p->rdev->config.evergreen.tiling_group_size;
+ p->track = track;
+ }
+ do {
+ r = evergreen_cs_packet_parse(p, &pkt, p->idx);
+ if (r) {
+ kfree(p->track);
+ p->track = NULL;
+ return r;
+ }
+ p->idx += pkt.count + 2;
+ switch (pkt.type) {
+ case PACKET_TYPE0:
+ r = evergreen_cs_parse_packet0(p, &pkt);
+ break;
+ case PACKET_TYPE2:
+ break;
+ case PACKET_TYPE3:
+ r = evergreen_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ kfree(p->track);
+ p->track = NULL;
+ return -EINVAL;
+ }
+ if (r) {
+ kfree(p->track);
+ p->track = NULL;
+ return r;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
+ mdelay(1);
+ }
+#endif
+ kfree(p->track);
+ p->track = NULL;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index af86af836f13..e028c1cd9d9b 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -151,6 +151,9 @@
#define EVERGREEN_DATA_FORMAT 0x6b00
# define EVERGREEN_INTERLEAVE_EN (1 << 0)
#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
+#define EVERGREEN_VLINE_START_END 0x6b08
+#define EVERGREEN_VLINE_STATUS 0x6bb8
+# define EVERGREEN_VLINE_STAT (1 << 12)
#define EVERGREEN_VIEWPORT_START 0x6d70
#define EVERGREEN_VIEWPORT_SIZE 0x6d74
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 93e9e17ad54a..79683f6b4452 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -218,6 +218,8 @@
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
#define PA_SC_AA_CONFIG 0x28C04
+#define MSAA_NUM_SAMPLES_SHIFT 0
+#define MSAA_NUM_SAMPLES_MASK 0x3
#define PA_SC_CLIPRECT_RULE 0x2820C
#define PA_SC_EDGERULE 0x28230
#define PA_SC_FIFO_SIZE 0x8BCC
@@ -553,4 +555,466 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+/*
+ * PM4
+ */
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDIRECT_BUFFER_SIZE 0x13
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_DRAW_INDEX_OFFSET 0x29
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDEX 0x2B
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
+# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
+# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
+# define PACKET3_CB11_DEST_BASE_ENA (1 << 17)
+# define PACKET3_FULL_CACHE_ENA (1 << 20)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_VC_ACTION_ENA (1 << 24)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_ACTION_ENA (1 << 27)
+# define PACKET3_SMX_ACTION_ENA (1 << 28)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+#define PACKET3_RB_OFFSET 0x4B
+#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
+#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
+#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
+#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00008000
+#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x00028000
+#define PACKET3_SET_CONTEXT_REG_END 0x00029000
+#define PACKET3_SET_ALU_CONST 0x6A
+/* alu const buffers only; no reg file */
+#define PACKET3_SET_BOOL_CONST 0x6B
+#define PACKET3_SET_BOOL_CONST_START 0x0003a500
+#define PACKET3_SET_BOOL_CONST_END 0x0003a518
+#define PACKET3_SET_LOOP_CONST 0x6C
+#define PACKET3_SET_LOOP_CONST_START 0x0003a200
+#define PACKET3_SET_LOOP_CONST_END 0x0003a500
+#define PACKET3_SET_RESOURCE 0x6D
+#define PACKET3_SET_RESOURCE_START 0x00030000
+#define PACKET3_SET_RESOURCE_END 0x00038000
+#define PACKET3_SET_SAMPLER 0x6E
+#define PACKET3_SET_SAMPLER_START 0x0003c000
+#define PACKET3_SET_SAMPLER_END 0x0003c600
+#define PACKET3_SET_CTL_CONST 0x6F
+#define PACKET3_SET_CTL_CONST_START 0x0003cff0
+#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
+#define PACKET3_SET_RESOURCE_OFFSET 0x70
+#define PACKET3_SET_ALU_CONST_VS 0x71
+#define PACKET3_SET_ALU_CONST_DI 0x72
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_APPEND_CNT 0x75
+
+#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c
+#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30)
+#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3)
+#define SQ_TEX_VTX_INVALID_TEXTURE 0x0
+#define SQ_TEX_VTX_INVALID_BUFFER 0x1
+#define SQ_TEX_VTX_VALID_TEXTURE 0x2
+#define SQ_TEX_VTX_VALID_BUFFER 0x3
+
+#define SQ_CONST_MEM_BASE 0x8df8
+
+#define SQ_ESGS_RING_SIZE 0x8c44
+#define SQ_GSVS_RING_SIZE 0x8c4c
+#define SQ_ESTMP_RING_SIZE 0x8c54
+#define SQ_GSTMP_RING_SIZE 0x8c5c
+#define SQ_VSTMP_RING_SIZE 0x8c64
+#define SQ_PSTMP_RING_SIZE 0x8c6c
+#define SQ_LSTMP_RING_SIZE 0x8e14
+#define SQ_HSTMP_RING_SIZE 0x8e1c
+#define VGT_TF_RING_SIZE 0x8988
+
+#define SQ_ESGS_RING_ITEMSIZE 0x28900
+#define SQ_GSVS_RING_ITEMSIZE 0x28904
+#define SQ_ESTMP_RING_ITEMSIZE 0x28908
+#define SQ_GSTMP_RING_ITEMSIZE 0x2890c
+#define SQ_VSTMP_RING_ITEMSIZE 0x28910
+#define SQ_PSTMP_RING_ITEMSIZE 0x28914
+#define SQ_LSTMP_RING_ITEMSIZE 0x28830
+#define SQ_HSTMP_RING_ITEMSIZE 0x28834
+
+#define SQ_GS_VERT_ITEMSIZE 0x2891c
+#define SQ_GS_VERT_ITEMSIZE_1 0x28920
+#define SQ_GS_VERT_ITEMSIZE_2 0x28924
+#define SQ_GS_VERT_ITEMSIZE_3 0x28928
+#define SQ_GSVS_RING_OFFSET_1 0x2892c
+#define SQ_GSVS_RING_OFFSET_2 0x28930
+#define SQ_GSVS_RING_OFFSET_3 0x28934
+
+#define SQ_ALU_CONST_CACHE_PS_0 0x28940
+#define SQ_ALU_CONST_CACHE_PS_1 0x28944
+#define SQ_ALU_CONST_CACHE_PS_2 0x28948
+#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4 0x28950
+#define SQ_ALU_CONST_CACHE_PS_5 0x28954
+#define SQ_ALU_CONST_CACHE_PS_6 0x28958
+#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8 0x28960
+#define SQ_ALU_CONST_CACHE_PS_9 0x28964
+#define SQ_ALU_CONST_CACHE_PS_10 0x28968
+#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12 0x28970
+#define SQ_ALU_CONST_CACHE_PS_13 0x28974
+#define SQ_ALU_CONST_CACHE_PS_14 0x28978
+#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0 0x28980
+#define SQ_ALU_CONST_CACHE_VS_1 0x28984
+#define SQ_ALU_CONST_CACHE_VS_2 0x28988
+#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4 0x28990
+#define SQ_ALU_CONST_CACHE_VS_5 0x28994
+#define SQ_ALU_CONST_CACHE_VS_6 0x28998
+#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
+#define SQ_ALU_CONST_CACHE_HS_0 0x28f00
+#define SQ_ALU_CONST_CACHE_HS_1 0x28f04
+#define SQ_ALU_CONST_CACHE_HS_2 0x28f08
+#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c
+#define SQ_ALU_CONST_CACHE_HS_4 0x28f10
+#define SQ_ALU_CONST_CACHE_HS_5 0x28f14
+#define SQ_ALU_CONST_CACHE_HS_6 0x28f18
+#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c
+#define SQ_ALU_CONST_CACHE_HS_8 0x28f20
+#define SQ_ALU_CONST_CACHE_HS_9 0x28f24
+#define SQ_ALU_CONST_CACHE_HS_10 0x28f28
+#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c
+#define SQ_ALU_CONST_CACHE_HS_12 0x28f30
+#define SQ_ALU_CONST_CACHE_HS_13 0x28f34
+#define SQ_ALU_CONST_CACHE_HS_14 0x28f38
+#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c
+#define SQ_ALU_CONST_CACHE_LS_0 0x28f40
+#define SQ_ALU_CONST_CACHE_LS_1 0x28f44
+#define SQ_ALU_CONST_CACHE_LS_2 0x28f48
+#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c
+#define SQ_ALU_CONST_CACHE_LS_4 0x28f50
+#define SQ_ALU_CONST_CACHE_LS_5 0x28f54
+#define SQ_ALU_CONST_CACHE_LS_6 0x28f58
+#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c
+#define SQ_ALU_CONST_CACHE_LS_8 0x28f60
+#define SQ_ALU_CONST_CACHE_LS_9 0x28f64
+#define SQ_ALU_CONST_CACHE_LS_10 0x28f68
+#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c
+#define SQ_ALU_CONST_CACHE_LS_12 0x28f70
+#define SQ_ALU_CONST_CACHE_LS_13 0x28f74
+#define SQ_ALU_CONST_CACHE_LS_14 0x28f78
+#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c
+
+#define DB_DEPTH_CONTROL 0x28800
+#define DB_DEPTH_VIEW 0x28008
+#define DB_HTILE_DATA_BASE 0x28014
+#define DB_Z_INFO 0x28040
+# define Z_ARRAY_MODE(x) ((x) << 4)
+#define DB_STENCIL_INFO 0x28044
+#define DB_Z_READ_BASE 0x28048
+#define DB_STENCIL_READ_BASE 0x2804c
+#define DB_Z_WRITE_BASE 0x28050
+#define DB_STENCIL_WRITE_BASE 0x28054
+#define DB_DEPTH_SIZE 0x28058
+
+#define SQ_PGM_START_PS 0x28840
+#define SQ_PGM_START_VS 0x2885c
+#define SQ_PGM_START_GS 0x28874
+#define SQ_PGM_START_ES 0x2888c
+#define SQ_PGM_START_FS 0x288a4
+#define SQ_PGM_START_HS 0x288b8
+#define SQ_PGM_START_LS 0x288d0
+
+#define VGT_STRMOUT_CONFIG 0x28b94
+#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
+
+#define CB_TARGET_MASK 0x28238
+#define CB_SHADER_MASK 0x2823c
+
+#define GDS_ADDR_BASE 0x28720
+
+#define CB_IMMED0_BASE 0x28b9c
+#define CB_IMMED1_BASE 0x28ba0
+#define CB_IMMED2_BASE 0x28ba4
+#define CB_IMMED3_BASE 0x28ba8
+#define CB_IMMED4_BASE 0x28bac
+#define CB_IMMED5_BASE 0x28bb0
+#define CB_IMMED6_BASE 0x28bb4
+#define CB_IMMED7_BASE 0x28bb8
+#define CB_IMMED8_BASE 0x28bbc
+#define CB_IMMED9_BASE 0x28bc0
+#define CB_IMMED10_BASE 0x28bc4
+#define CB_IMMED11_BASE 0x28bc8
+
+/* all 12 CB blocks have these regs */
+#define CB_COLOR0_BASE 0x28c60
+#define CB_COLOR0_PITCH 0x28c64
+#define CB_COLOR0_SLICE 0x28c68
+#define CB_COLOR0_VIEW 0x28c6c
+#define CB_COLOR0_INFO 0x28c70
+# define CB_ARRAY_MODE(x) ((x) << 8)
+# define ARRAY_LINEAR_GENERAL 0
+# define ARRAY_LINEAR_ALIGNED 1
+# define ARRAY_1D_TILED_THIN1 2
+# define ARRAY_2D_TILED_THIN1 4
+#define CB_COLOR0_ATTRIB 0x28c74
+#define CB_COLOR0_DIM 0x28c78
+/* only CB0-7 blocks have these regs */
+#define CB_COLOR0_CMASK 0x28c7c
+#define CB_COLOR0_CMASK_SLICE 0x28c80
+#define CB_COLOR0_FMASK 0x28c84
+#define CB_COLOR0_FMASK_SLICE 0x28c88
+#define CB_COLOR0_CLEAR_WORD0 0x28c8c
+#define CB_COLOR0_CLEAR_WORD1 0x28c90
+#define CB_COLOR0_CLEAR_WORD2 0x28c94
+#define CB_COLOR0_CLEAR_WORD3 0x28c98
+
+#define CB_COLOR1_BASE 0x28c9c
+#define CB_COLOR2_BASE 0x28cd8
+#define CB_COLOR3_BASE 0x28d14
+#define CB_COLOR4_BASE 0x28d50
+#define CB_COLOR5_BASE 0x28d8c
+#define CB_COLOR6_BASE 0x28dc8
+#define CB_COLOR7_BASE 0x28e04
+#define CB_COLOR8_BASE 0x28e40
+#define CB_COLOR9_BASE 0x28e5c
+#define CB_COLOR10_BASE 0x28e78
+#define CB_COLOR11_BASE 0x28e94
+
+#define CB_COLOR1_PITCH 0x28ca0
+#define CB_COLOR2_PITCH 0x28cdc
+#define CB_COLOR3_PITCH 0x28d18
+#define CB_COLOR4_PITCH 0x28d54
+#define CB_COLOR5_PITCH 0x28d90
+#define CB_COLOR6_PITCH 0x28dcc
+#define CB_COLOR7_PITCH 0x28e08
+#define CB_COLOR8_PITCH 0x28e44
+#define CB_COLOR9_PITCH 0x28e60
+#define CB_COLOR10_PITCH 0x28e7c
+#define CB_COLOR11_PITCH 0x28e98
+
+#define CB_COLOR1_SLICE 0x28ca4
+#define CB_COLOR2_SLICE 0x28ce0
+#define CB_COLOR3_SLICE 0x28d1c
+#define CB_COLOR4_SLICE 0x28d58
+#define CB_COLOR5_SLICE 0x28d94
+#define CB_COLOR6_SLICE 0x28dd0
+#define CB_COLOR7_SLICE 0x28e0c
+#define CB_COLOR8_SLICE 0x28e48
+#define CB_COLOR9_SLICE 0x28e64
+#define CB_COLOR10_SLICE 0x28e80
+#define CB_COLOR11_SLICE 0x28e9c
+
+#define CB_COLOR1_VIEW 0x28ca8
+#define CB_COLOR2_VIEW 0x28ce4
+#define CB_COLOR3_VIEW 0x28d20
+#define CB_COLOR4_VIEW 0x28d5c
+#define CB_COLOR5_VIEW 0x28d98
+#define CB_COLOR6_VIEW 0x28dd4
+#define CB_COLOR7_VIEW 0x28e10
+#define CB_COLOR8_VIEW 0x28e4c
+#define CB_COLOR9_VIEW 0x28e68
+#define CB_COLOR10_VIEW 0x28e84
+#define CB_COLOR11_VIEW 0x28ea0
+
+#define CB_COLOR1_INFO 0x28cac
+#define CB_COLOR2_INFO 0x28ce8
+#define CB_COLOR3_INFO 0x28d24
+#define CB_COLOR4_INFO 0x28d60
+#define CB_COLOR5_INFO 0x28d9c
+#define CB_COLOR6_INFO 0x28dd8
+#define CB_COLOR7_INFO 0x28e14
+#define CB_COLOR8_INFO 0x28e50
+#define CB_COLOR9_INFO 0x28e6c
+#define CB_COLOR10_INFO 0x28e88
+#define CB_COLOR11_INFO 0x28ea4
+
+#define CB_COLOR1_ATTRIB 0x28cb0
+#define CB_COLOR2_ATTRIB 0x28cec
+#define CB_COLOR3_ATTRIB 0x28d28
+#define CB_COLOR4_ATTRIB 0x28d64
+#define CB_COLOR5_ATTRIB 0x28da0
+#define CB_COLOR6_ATTRIB 0x28ddc
+#define CB_COLOR7_ATTRIB 0x28e18
+#define CB_COLOR8_ATTRIB 0x28e54
+#define CB_COLOR9_ATTRIB 0x28e70
+#define CB_COLOR10_ATTRIB 0x28e8c
+#define CB_COLOR11_ATTRIB 0x28ea8
+
+#define CB_COLOR1_DIM 0x28cb4
+#define CB_COLOR2_DIM 0x28cf0
+#define CB_COLOR3_DIM 0x28d2c
+#define CB_COLOR4_DIM 0x28d68
+#define CB_COLOR5_DIM 0x28da4
+#define CB_COLOR6_DIM 0x28de0
+#define CB_COLOR7_DIM 0x28e1c
+#define CB_COLOR8_DIM 0x28e58
+#define CB_COLOR9_DIM 0x28e74
+#define CB_COLOR10_DIM 0x28e90
+#define CB_COLOR11_DIM 0x28eac
+
+#define CB_COLOR1_CMASK 0x28cb8
+#define CB_COLOR2_CMASK 0x28cf4
+#define CB_COLOR3_CMASK 0x28d30
+#define CB_COLOR4_CMASK 0x28d6c
+#define CB_COLOR5_CMASK 0x28da8
+#define CB_COLOR6_CMASK 0x28de4
+#define CB_COLOR7_CMASK 0x28e20
+
+#define CB_COLOR1_CMASK_SLICE 0x28cbc
+#define CB_COLOR2_CMASK_SLICE 0x28cf8
+#define CB_COLOR3_CMASK_SLICE 0x28d34
+#define CB_COLOR4_CMASK_SLICE 0x28d70
+#define CB_COLOR5_CMASK_SLICE 0x28dac
+#define CB_COLOR6_CMASK_SLICE 0x28de8
+#define CB_COLOR7_CMASK_SLICE 0x28e24
+
+#define CB_COLOR1_FMASK 0x28cc0
+#define CB_COLOR2_FMASK 0x28cfc
+#define CB_COLOR3_FMASK 0x28d38
+#define CB_COLOR4_FMASK 0x28d74
+#define CB_COLOR5_FMASK 0x28db0
+#define CB_COLOR6_FMASK 0x28dec
+#define CB_COLOR7_FMASK 0x28e28
+
+#define CB_COLOR1_FMASK_SLICE 0x28cc4
+#define CB_COLOR2_FMASK_SLICE 0x28d00
+#define CB_COLOR3_FMASK_SLICE 0x28d3c
+#define CB_COLOR4_FMASK_SLICE 0x28d78
+#define CB_COLOR5_FMASK_SLICE 0x28db4
+#define CB_COLOR6_FMASK_SLICE 0x28df0
+#define CB_COLOR7_FMASK_SLICE 0x28e2c
+
+#define CB_COLOR1_CLEAR_WORD0 0x28cc8
+#define CB_COLOR2_CLEAR_WORD0 0x28d04
+#define CB_COLOR3_CLEAR_WORD0 0x28d40
+#define CB_COLOR4_CLEAR_WORD0 0x28d7c
+#define CB_COLOR5_CLEAR_WORD0 0x28db8
+#define CB_COLOR6_CLEAR_WORD0 0x28df4
+#define CB_COLOR7_CLEAR_WORD0 0x28e30
+
+#define CB_COLOR1_CLEAR_WORD1 0x28ccc
+#define CB_COLOR2_CLEAR_WORD1 0x28d08
+#define CB_COLOR3_CLEAR_WORD1 0x28d44
+#define CB_COLOR4_CLEAR_WORD1 0x28d80
+#define CB_COLOR5_CLEAR_WORD1 0x28dbc
+#define CB_COLOR6_CLEAR_WORD1 0x28df8
+#define CB_COLOR7_CLEAR_WORD1 0x28e34
+
+#define CB_COLOR1_CLEAR_WORD2 0x28cd0
+#define CB_COLOR2_CLEAR_WORD2 0x28d0c
+#define CB_COLOR3_CLEAR_WORD2 0x28d48
+#define CB_COLOR4_CLEAR_WORD2 0x28d84
+#define CB_COLOR5_CLEAR_WORD2 0x28dc0
+#define CB_COLOR6_CLEAR_WORD2 0x28dfc
+#define CB_COLOR7_CLEAR_WORD2 0x28e38
+
+#define CB_COLOR1_CLEAR_WORD3 0x28cd4
+#define CB_COLOR2_CLEAR_WORD3 0x28d10
+#define CB_COLOR3_CLEAR_WORD3 0x28d4c
+#define CB_COLOR4_CLEAR_WORD3 0x28d88
+#define CB_COLOR5_CLEAR_WORD3 0x28dc4
+#define CB_COLOR6_CLEAR_WORD3 0x28e00
+#define CB_COLOR7_CLEAR_WORD3 0x28e3c
+
+#define SQ_TEX_RESOURCE_WORD0_0 0x30000
+#define SQ_TEX_RESOURCE_WORD1_0 0x30004
+# define TEX_ARRAY_MODE(x) ((x) << 28)
+#define SQ_TEX_RESOURCE_WORD2_0 0x30008
+#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
+#define SQ_TEX_RESOURCE_WORD4_0 0x30010
+#define SQ_TEX_RESOURCE_WORD5_0 0x30014
+#define SQ_TEX_RESOURCE_WORD6_0 0x30018
+#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+
+
#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cc004b05d63e..cf89aa2eb28c 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -162,6 +162,11 @@ void r100_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
@@ -172,6 +177,11 @@ void r100_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4415a5ee5871..e6c89142bb4d 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -45,9 +45,14 @@ void r420_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
@@ -58,6 +63,11 @@ void r420_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 44e96a2ae25a..0e91871f45be 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -291,6 +291,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
@@ -301,6 +306,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
@@ -317,6 +327,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
@@ -327,6 +342,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
@@ -343,6 +363,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
@@ -353,6 +378,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
@@ -375,6 +405,11 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
@@ -385,6 +420,11 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
@@ -401,7 +441,12 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
@@ -411,7 +456,12 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
@@ -430,14 +480,30 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
} else {
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ }
+ /* mid sh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
}
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
@@ -453,14 +519,30 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
} else {
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ }
+ /* mid mh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
}
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
@@ -475,7 +557,18 @@ void r600_pm_init_profile(struct radeon_device *rdev)
void r600_pm_misc(struct radeon_device *rdev)
{
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ if (voltage->voltage != rdev->pm.current_vddc) {
+ radeon_atom_set_voltage(rdev, voltage->voltage);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ }
+ }
}
bool r600_gui_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 66a37fb75839..8e1d44ca26ec 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -176,6 +176,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
/*
* Fences.
@@ -576,6 +577,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
*/
int radeon_agp_init(struct radeon_device *rdev);
void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
void radeon_agp_fini(struct radeon_device *rdev);
@@ -646,15 +648,18 @@ enum radeon_pm_profile_type {
PM_PROFILE_DEFAULT,
PM_PROFILE_AUTO,
PM_PROFILE_LOW,
+ PM_PROFILE_MID,
PM_PROFILE_HIGH,
};
#define PM_PROFILE_DEFAULT_IDX 0
#define PM_PROFILE_LOW_SH_IDX 1
-#define PM_PROFILE_HIGH_SH_IDX 2
-#define PM_PROFILE_LOW_MH_IDX 3
-#define PM_PROFILE_HIGH_MH_IDX 4
-#define PM_PROFILE_MAX 5
+#define PM_PROFILE_MID_SH_IDX 2
+#define PM_PROFILE_HIGH_SH_IDX 3
+#define PM_PROFILE_LOW_MH_IDX 4
+#define PM_PROFILE_MID_MH_IDX 5
+#define PM_PROFILE_HIGH_MH_IDX 6
+#define PM_PROFILE_MAX 7
struct radeon_pm_profile {
int dpms_off_ps_idx;
@@ -743,6 +748,7 @@ struct radeon_pm {
int default_power_state_index;
u32 current_sclk;
u32 current_mclk;
+ u32 current_vddc;
struct radeon_i2c_chan *i2c_bus;
/* selected pm method */
enum radeon_pm_method pm_method;
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 28e473f1f56f..f40dfb77f9b1 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev)
}
#endif
}
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+ radeon_agp_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e57df08d4aeb..87f7e2cc52d4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -724,8 +724,8 @@ static struct radeon_asic evergreen_asic = {
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = NULL,
- .cs_parse = NULL,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
.copy_blit = NULL,
.copy_dma = NULL,
.copy = NULL,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5c40a3dfaca2..c0bbaa64157a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -314,6 +314,7 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev,
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
int evergreen_irq_set(struct radeon_device *rdev);
int evergreen_irq_process(struct radeon_device *rdev);
+extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6e733fdc3349..99bd8a9c56b3 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -680,11 +680,19 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
uint8_t dac;
union atom_supported_devices *supported_devices;
int i, j, max_device;
- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
+ struct bios_connector *bios_connectors;
+ size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
- if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+ bios_connectors = kzalloc(bc_size, GFP_KERNEL);
+ if (!bios_connectors)
return false;
+ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
+ &data_offset)) {
+ kfree(bios_connectors);
+ return false;
+ }
+
supported_devices =
(union atom_supported_devices *)(ctx->bios + data_offset);
@@ -851,6 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
radeon_link_encoder_connector(dev);
+ kfree(bios_connectors);
return true;
}
@@ -1529,7 +1538,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].pcie_lanes =
power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1596,7 +1606,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1670,7 +1681,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1746,9 +1758,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].misc2 = 0;
}
} else {
+ int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ uint8_t fw_frev, fw_crev;
+ uint16_t fw_data_offset, vddc = 0;
+ union firmware_info *firmware_info;
+ ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
+
+ if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL,
+ &fw_frev, &fw_crev, &fw_data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ fw_data_offset);
+ vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+ }
+
/* add the i2c bus for thermal/fan chip */
/* no support for internal controller yet */
- ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
if (controller->ucType > 0) {
if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
(controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
@@ -1808,10 +1833,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
/* skip invalid modes */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
continue;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
- VOLTAGE_SW;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
- clock_info->usVDDC;
+ /* voltage works differently on IGPs */
mode_index++;
} else if (ASIC_IS_DCE4(rdev)) {
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
@@ -1895,6 +1917,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+ /* patch the table values with the default slck/mclk from firmware info */
+ for (j = 0; j < mode_index; j++) {
+ rdev->pm.power_state[state_index].clock_info[j].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[j].sclk =
+ rdev->clock.default_sclk;
+ if (vddc)
+ rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+ vddc;
+ }
}
state_index++;
}
@@ -1934,6 +1966,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
}
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
@@ -1989,6 +2022,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+union set_voltage {
+ struct _SET_VOLTAGE_PS_ALLOCATION alloc;
+ struct _SET_VOLTAGE_PARAMETERS v1;
+ struct _SET_VOLTAGE_PARAMETERS_V2 v2;
+};
+
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev, volt_index = level;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (crev) {
+ case 1:
+ args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
+ args.v1.ucVoltageIndex = volt_index;
+ break;
+ case 2:
+ args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
+ args.v2.usVoltageLevel = cpu_to_le16(level);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+
+
void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 7b5e10d3e9c9..1bee2f9e24a5 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2026,6 +2026,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
break;
default:
+ ddc_i2c.valid = false;
break;
}
@@ -2339,6 +2340,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (RBIOS8(tv_info + 6) == 'T') {
if (radeon_apply_legacy_tv_quirks(dev)) {
hpd.hpd = RADEON_HPD_NONE;
+ ddc_i2c.valid = false;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
(dev,
@@ -2454,7 +2456,12 @@ default_mode:
rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ if ((state_index > 0) &&
+ (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO))
+ rdev->pm.power_state[state_index].clock_info[0].voltage =
+ rdev->pm.power_state[0].clock_info[0].voltage;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
rdev->pm.power_state[state_index].pcie_lanes = 16;
rdev->pm.power_state[state_index].flags = 0;
rdev->pm.default_power_state_index = state_index;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a20b612ffe75..f10faed21567 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
/* don't suspend or resume card normally */
rdev->powered_down = false;
radeon_resume_kms(dev);
+ drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_INFO "radeon: switched off\n");
+ drm_kms_helper_poll_disable(dev);
radeon_suspend_kms(dev, pmm);
/* don't suspend or resume card normally */
rdev->powered_down = true;
@@ -711,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
struct radeon_device *rdev;
struct drm_crtc *crtc;
+ struct drm_connector *connector;
int r;
if (dev == NULL || dev->dev_private == NULL) {
@@ -723,6 +726,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
if (rdev->powered_down)
return 0;
+
+ /* turn off display hw */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
@@ -754,6 +763,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
/* evict remaining vram memory */
radeon_bo_evict_vram(rdev);
+ radeon_agp_suspend(rdev);
+
pci_save_state(dev->pdev);
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1006549d1570..8154cdf796e4 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -284,8 +284,7 @@ static const char *connector_names[15] = {
"eDP",
};
-static const char *hpd_names[7] = {
- "NONE",
+static const char *hpd_names[6] = {
"HPD1",
"HPD2",
"HPD3",
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 902d1731a652..e166fe4d7c30 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -45,9 +45,10 @@
* - 2.2.0 - add r6xx/r7xx const buffer support
* - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
* - 2.4.0 - add crtc id query
+ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 4
+#define KMS_DRIVER_MINOR 5
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index e192acfbf0cd..dc1634bb0c11 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -363,6 +363,7 @@ int radeon_fbdev_init(struct radeon_device *rdev)
{
struct radeon_fbdev *rfbdev;
int bpp_sel = 32;
+ int ret;
/* select 8 bpp console on RN50 or 16MB cards */
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
@@ -376,9 +377,14 @@ int radeon_fbdev_init(struct radeon_device *rdev)
rdev->mode_info.rfbdev = rfbdev;
rfbdev->helper.funcs = &radeon_fb_helper_funcs;
- drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
- rdev->num_crtc,
- RADEONFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+ rdev->num_crtc,
+ RADEONFB_CONN_LIMIT);
+ if (ret) {
+ kfree(rfbdev);
+ return ret;
+ }
+
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 04068352ccd2..6a70c0dc7f92 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -118,7 +118,11 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
value = rdev->num_z_pipes;
break;
case RADEON_INFO_ACCEL_WORKING:
- value = rdev->accel_working;
+ /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
+ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
+ value = false;
+ else
+ value = rdev->accel_working;
break;
case RADEON_INFO_CRTC_FROM_ID:
for (i = 0, found = 0; i < rdev->num_crtc; i++) {
@@ -134,6 +138,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_ACCEL_WORKING2:
+ value = rdev->accel_working;
+ break;
default:
DRM_DEBUG("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 5a13b3eeef19..5b07b8848e09 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1168,6 +1168,17 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
bool color = true;
+ struct drm_crtc *crtc;
+
+ /* find out if crtc2 is in use or if this encoder is using it */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ if ((radeon_crtc->crtc_id == 1) && crtc->enabled) {
+ if (encoder->crtc != crtc) {
+ return connector_status_disconnected;
+ }
+ }
+ }
if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO ||
connector->connector_type == DRM_MODE_CONNECTOR_Composite ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index a8d162c6f829..63f679a04b25 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -33,6 +33,14 @@
#define RADEON_WAIT_VBLANK_TIMEOUT 200
#define RADEON_WAIT_IDLE_TIMEOUT 200
+static const char *radeon_pm_state_type_name[5] = {
+ "Default",
+ "Powersave",
+ "Battery",
+ "Balanced",
+ "Performance",
+};
+
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
@@ -84,9 +92,9 @@ static void radeon_pm_update_profile(struct radeon_device *rdev)
rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
} else {
if (rdev->pm.active_crtc_count > 1)
- rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
else
- rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
+ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
}
break;
case PM_PROFILE_LOW:
@@ -95,6 +103,12 @@ static void radeon_pm_update_profile(struct radeon_device *rdev)
else
rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
break;
+ case PM_PROFILE_MID:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+ break;
case PM_PROFILE_HIGH:
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
@@ -127,15 +141,6 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev)
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
ttm_bo_unmap_virtual(&bo->tbo);
}
-
- if (rdev->gart.table.vram.robj)
- ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
-
- if (rdev->stollen_vga_memory)
- ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
-
- if (rdev->r600_blit.shader_obj)
- ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
}
static void radeon_sync_with_vblank(struct radeon_device *rdev)
@@ -151,6 +156,7 @@ static void radeon_sync_with_vblank(struct radeon_device *rdev)
static void radeon_set_power_state(struct radeon_device *rdev)
{
u32 sclk, mclk;
+ bool misc_after = false;
if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
(rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
@@ -167,55 +173,47 @@ static void radeon_set_power_state(struct radeon_device *rdev)
if (mclk > rdev->clock.default_mclk)
mclk = rdev->clock.default_mclk;
- /* voltage, pcie lanes, etc.*/
- radeon_pm_misc(rdev);
+ /* upvolt before raising clocks, downvolt after lowering clocks */
+ if (sclk < rdev->pm.current_sclk)
+ misc_after = true;
- if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
- radeon_sync_with_vblank(rdev);
+ radeon_sync_with_vblank(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (!radeon_pm_in_vbl(rdev))
return;
+ }
- radeon_pm_prepare(rdev);
- /* set engine clock */
- if (sclk != rdev->pm.current_sclk) {
- radeon_pm_debug_check_in_vbl(rdev, false);
- radeon_set_engine_clock(rdev, sclk);
- radeon_pm_debug_check_in_vbl(rdev, true);
- rdev->pm.current_sclk = sclk;
- DRM_DEBUG("Setting: e: %d\n", sclk);
- }
+ radeon_pm_prepare(rdev);
- /* set memory clock */
- if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
- radeon_pm_debug_check_in_vbl(rdev, false);
- radeon_set_memory_clock(rdev, mclk);
- radeon_pm_debug_check_in_vbl(rdev, true);
- rdev->pm.current_mclk = mclk;
- DRM_DEBUG("Setting: m: %d\n", mclk);
- }
- radeon_pm_finish(rdev);
- } else {
- /* set engine clock */
- if (sclk != rdev->pm.current_sclk) {
- radeon_sync_with_vblank(rdev);
- radeon_pm_prepare(rdev);
- radeon_set_engine_clock(rdev, sclk);
- radeon_pm_finish(rdev);
- rdev->pm.current_sclk = sclk;
- DRM_DEBUG("Setting: e: %d\n", sclk);
- }
- /* set memory clock */
- if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
- radeon_sync_with_vblank(rdev);
- radeon_pm_prepare(rdev);
- radeon_set_memory_clock(rdev, mclk);
- radeon_pm_finish(rdev);
- rdev->pm.current_mclk = mclk;
- DRM_DEBUG("Setting: m: %d\n", mclk);
- }
+ if (!misc_after)
+ /* voltage, pcie lanes, etc.*/
+ radeon_pm_misc(rdev);
+
+ /* set engine clock */
+ if (sclk != rdev->pm.current_sclk) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_engine_clock(rdev, sclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_sclk = sclk;
+ DRM_DEBUG("Setting: e: %d\n", sclk);
}
+ /* set memory clock */
+ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_memory_clock(rdev, mclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_mclk = mclk;
+ DRM_DEBUG("Setting: m: %d\n", mclk);
+ }
+
+ if (misc_after)
+ /* voltage, pcie lanes, etc.*/
+ radeon_pm_misc(rdev);
+
+ radeon_pm_finish(rdev);
+
rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
} else
@@ -288,6 +286,42 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_unlock(&rdev->ddev->struct_mutex);
}
+static void radeon_pm_print_states(struct radeon_device *rdev)
+{
+ int i, j;
+ struct radeon_power_state *power_state;
+ struct radeon_pm_clock_info *clock_info;
+
+ DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states);
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ power_state = &rdev->pm.power_state[i];
+ DRM_DEBUG("State %d: %s\n", i,
+ radeon_pm_state_type_name[power_state->type]);
+ if (i == rdev->pm.default_power_state_index)
+ DRM_DEBUG("\tDefault");
+ if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
+ DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes);
+ if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ DRM_DEBUG("\tSingle display only\n");
+ DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
+ for (j = 0; j < power_state->num_clock_modes; j++) {
+ clock_info = &(power_state->clock_info[j]);
+ if (rdev->flags & RADEON_IS_IGP)
+ DRM_DEBUG("\t\t%d e: %d%s\n",
+ j,
+ clock_info->sclk * 10,
+ clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
+ else
+ DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n",
+ j,
+ clock_info->sclk * 10,
+ clock_info->mclk * 10,
+ clock_info->voltage.voltage,
+ clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
+ }
+ }
+}
+
static ssize_t radeon_get_pm_profile(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -318,6 +352,8 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
rdev->pm.profile = PM_PROFILE_AUTO;
else if (strncmp("low", buf, strlen("low")) == 0)
rdev->pm.profile = PM_PROFILE_LOW;
+ else if (strncmp("mid", buf, strlen("mid")) == 0)
+ rdev->pm.profile = PM_PROFILE_MID;
else if (strncmp("high", buf, strlen("high")) == 0)
rdev->pm.profile = PM_PROFILE_HIGH;
else {
@@ -384,15 +420,19 @@ void radeon_pm_suspend(struct radeon_device *rdev)
{
mutex_lock(&rdev->pm.mutex);
cancel_delayed_work(&rdev->pm.dynpm_idle_work);
- rdev->pm.current_power_state_index = -1;
- rdev->pm.current_clock_mode_index = -1;
- rdev->pm.current_sclk = 0;
- rdev->pm.current_mclk = 0;
mutex_unlock(&rdev->pm.mutex);
}
void radeon_pm_resume(struct radeon_device *rdev)
{
+ /* asic init will reset the default power state */
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+ rdev->pm.current_sclk = rdev->clock.default_sclk;
+ rdev->pm.current_mclk = rdev->clock.default_mclk;
+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ mutex_unlock(&rdev->pm.mutex);
radeon_pm_compute_clocks(rdev);
}
@@ -401,32 +441,24 @@ int radeon_pm_init(struct radeon_device *rdev)
int ret;
/* default to profile method */
rdev->pm.pm_method = PM_METHOD_PROFILE;
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
- rdev->pm.current_sclk = 0;
- rdev->pm.current_mclk = 0;
+ rdev->pm.current_sclk = rdev->clock.default_sclk;
+ rdev->pm.current_mclk = rdev->clock.default_mclk;
if (rdev->bios) {
if (rdev->is_atom_bios)
radeon_atombios_get_power_modes(rdev);
else
radeon_combios_get_power_modes(rdev);
+ radeon_pm_print_states(rdev);
radeon_pm_init_profile(rdev);
- rdev->pm.current_power_state_index = -1;
- rdev->pm.current_clock_mode_index = -1;
}
if (rdev->pm.num_power_states > 1) {
- if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
- mutex_lock(&rdev->pm.mutex);
- rdev->pm.profile = PM_PROFILE_DEFAULT;
- radeon_pm_update_profile(rdev);
- radeon_pm_set_clocks(rdev);
- mutex_unlock(&rdev->pm.mutex);
- }
-
/* where's the best place to put these? */
ret = device_create_file(rdev->dev, &dev_attr_power_profile);
if (ret)
@@ -712,6 +744,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ if (rdev->pm.current_vddc)
+ seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
if (rdev->asic->get_pcie_lanes)
seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index cc5316dcf580..b3ba44c0a818 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
flags |= RADEON_FRONT;
}
if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
- if (!dev_priv->have_z_offset)
+ if (!dev_priv->have_z_offset) {
printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
- flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ }
}
if (flags & (RADEON_FRONT | RADEON_BACK)) {
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
new file mode 100644
index 000000000000..b5c757f68d3c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -0,0 +1,611 @@
+evergreen 0x9400
+0x00008040 WAIT_UNTIL
+0x00008044 WAIT_UNTIL_POLL_CNTL
+0x00008048 WAIT_UNTIL_POLL_MASK
+0x0000804c WAIT_UNTIL_POLL_REFDATA
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C0C SQ_GPR_RESOURCE_MGMT_3
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008C18 SQ_THREAD_RESOURCE_MGMT
+0x00008C1C SQ_THREAD_RESOURCE_MGMT_2
+0x00008C20 SQ_STACK_RESOURCE_MGMT_1
+0x00008C24 SQ_STACK_RESOURCE_MGMT_2
+0x00008C28 SQ_STACK_RESOURCE_MGMT_3
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009700 VC_CNTL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x0002805C DB_DEPTH_SLICE
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028350 SX_MISC
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x000286F8 GDS_ADDR_SIZE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x0002885C SQ_PGM_RESOURCES_VS
+0x00028860 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D0 SQ_PGM_RESOURCES_LS
+0x000288D4 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028ABC DB_HTILE_SURFACE
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B5C VGT_LS_SIZE
+0x00028B60 VGT_HS_SIZE
+0x00028B64 VGT_LS_HS_ALLOC
+0x00028B68 VGT_HS_PATCH_CONST
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028C00 PA_SC_LINE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_0
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_1
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_2
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_3
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_4
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_5
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
+0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
+0x00028C3C PA_SC_AA_MASK
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 79887cac5b54..7bb4c3e52f3b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -74,7 +74,8 @@ void rs600_pm_misc(struct radeon_device *rdev)
if (voltage->delay)
udelay(voltage->delay);
}
- }
+ } else if (voltage->type == VOLTAGE_VDDC)
+ radeon_atom_set_voltage(rdev, voltage->vddc_id);
dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 253f24aec031..cec536c222c5 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -44,7 +44,18 @@ void rv770_fini(struct radeon_device *rdev);
void rv770_pm_misc(struct radeon_device *rdev)
{
-
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ if (voltage->voltage != rdev->pm.current_vddc) {
+ radeon_atom_set_voltage(rdev, voltage->voltage);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ }
+ }
}
/*
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 0d9a42c2394f..ef910694bd63 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -77,7 +77,7 @@ struct ttm_page_pool {
/**
* Limits for the pool. They are handled without locks because only place where
* they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialiazation to access them is pointless.
+ * so forcing serialization to access them is pointless.
*/
struct ttm_pool_opts {
@@ -165,16 +165,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
- printk(KERN_ERR "[ttm] Setting allocation size to %lu "
- "is not allowed. Recomended size is "
- "%lu\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ printk(KERN_ERR TTM_PFX
+ "Setting allocation size to %lu "
+ "is not allowed. Recommended size is "
+ "%lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
- printk(KERN_WARNING "[ttm] Setting allocation size to "
- "larger than %lu is not recomended.\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ printk(KERN_WARNING TTM_PFX
+ "Setting allocation size to "
+ "larger than %lu is not recommended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
@@ -277,7 +279,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
{
unsigned i;
if (set_pages_array_wb(pages, npages))
- printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
+ printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
npages);
for (i = 0; i < npages; ++i)
__free_page(pages[i]);
@@ -313,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
- printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
+ printk(KERN_ERR TTM_PFX
+ "Failed to allocate memory for pool free operation.\n");
return 0;
}
@@ -390,7 +393,7 @@ static int ttm_pool_get_num_unused_pages(void)
}
/**
- * Calback for mm to request pool to reduce number of page held.
+ * Callback for mm to request pool to reduce number of page held.
*/
static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
{
@@ -433,14 +436,16 @@ static int ttm_set_pages_caching(struct page **pages,
case tt_uncached:
r = set_pages_array_uc(pages, cpages);
if (r)
- printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
- cpages);
+ printk(KERN_ERR TTM_PFX
+ "Failed to set %d pages to uc!\n",
+ cpages);
break;
case tt_wc:
r = set_pages_array_wc(pages, cpages);
if (r)
- printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
- cpages);
+ printk(KERN_ERR TTM_PFX
+ "Failed to set %d pages to wc!\n",
+ cpages);
break;
default:
break;
@@ -458,7 +463,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
struct page **failed_pages, unsigned cpages)
{
unsigned i;
- /* Failed pages has to be reed */
+ /* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
list_del(&failed_pages[i]->lru);
__free_page(failed_pages[i]);
@@ -485,7 +490,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+ printk(KERN_ERR TTM_PFX
+ "Unable to allocate table for new pages.");
return -ENOMEM;
}
@@ -493,12 +499,13 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
p = alloc_page(gfp_flags);
if (!p) {
- printk(KERN_ERR "[ttm] unable to get page %u\n", i);
+ printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
if (cpages) {
- r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
if (r)
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
@@ -590,7 +597,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills;
pool->npages += alloc_size;
} else {
- printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
+ printk(KERN_ERR TTM_PFX
+ "Failed to fill pool (%p).", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &pool->list, lru) {
++cpages;
@@ -671,13 +679,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32;
else
- gfp_flags |= __GFP_HIGHMEM;
+ gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) {
p = alloc_page(gfp_flags);
if (!p) {
- printk(KERN_ERR "[ttm] unable to allocate page.");
+ printk(KERN_ERR TTM_PFX
+ "Unable to allocate page.");
return -ENOMEM;
}
@@ -709,8 +718,9 @@ int ttm_get_pages(struct list_head *pages, int flags,
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
- printk(KERN_ERR "[ttm] Failed to allocate extra pages "
- "for large request.");
+ printk(KERN_ERR TTM_PFX
+ "Failed to allocate extra pages "
+ "for large request.");
ttm_put_pages(pages, 0, flags, cstate);
return r;
}
@@ -778,7 +788,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
return 0;
- printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
+ printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
@@ -813,7 +823,7 @@ void ttm_page_alloc_fini()
if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
return;
- printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
+ printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
ttm_pool_mm_shrink_fini(&_manager);
for (i = 0; i < NUM_POOLS; ++i)
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 1a3cb6816d1c..4505e17df3f5 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
- vmwgfx_overlay.o
+ vmwgfx_overlay.o vmwgfx_fence.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0c9c0811f42d..b793c8c9acb3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -88,6 +88,9 @@
#define DRM_IOCTL_VMW_FENCE_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
struct drm_vmw_fence_wait_arg)
+#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
+ struct drm_vmw_update_layout_arg)
/**
@@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
- DRM_AUTH | DRM_UNLOCKED)
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -318,6 +323,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err3;
}
+ /* Need mmio memory to check for fifo pitchlock cap. */
+ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+ !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
+ !vmw_fifo_have_pitchlock(dev_priv)) {
+ ret = -ENOSYS;
+ DRM_ERROR("Hardware has no pitchlock\n");
+ goto out_err4;
+ }
+
dev_priv->tdev = ttm_object_device_init
(dev_priv->mem_global_ref.object, 12);
@@ -399,8 +413,6 @@ static int vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
-
unregister_pm_notifier(&dev_priv->pm_nb);
vmw_fb_close(dev_priv);
@@ -546,7 +558,6 @@ static int vmw_master_create(struct drm_device *dev,
{
struct vmw_master *vmaster;
- DRM_INFO("Master create.\n");
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
if (unlikely(vmaster == NULL))
return -ENOMEM;
@@ -563,7 +574,6 @@ static void vmw_master_destroy(struct drm_device *dev,
{
struct vmw_master *vmaster = vmw_master(master);
- DRM_INFO("Master destroy.\n");
master->driver_priv = NULL;
kfree(vmaster);
}
@@ -579,8 +589,6 @@ static int vmw_master_set(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
- DRM_INFO("Master set.\n");
-
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -622,8 +630,6 @@ static void vmw_master_drop(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- DRM_INFO("Master drop.\n");
-
/**
* Make sure the master doesn't disappear while we have
* it locked.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 356dc935ec13..eaad52095339 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,12 +41,13 @@
#define VMWGFX_DRIVER_DATE "20100209"
#define VMWGFX_DRIVER_MAJOR 1
-#define VMWGFX_DRIVER_MINOR 0
+#define VMWGFX_DRIVER_MINOR 2
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_GMRS 2048
+#define VMWGFX_MAX_DISPLAYS 16
struct vmw_fpriv {
struct drm_master *locked_master;
@@ -102,6 +103,13 @@ struct vmw_surface {
struct vmw_cursor_snooper snooper;
};
+struct vmw_fence_queue {
+ struct list_head head;
+ struct timespec lag;
+ struct timespec lag_time;
+ spinlock_t lock;
+};
+
struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
@@ -115,6 +123,7 @@ struct vmw_fifo_state {
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
+ struct vmw_fence_queue fence_queue;
};
struct vmw_relocation {
@@ -144,6 +153,14 @@ struct vmw_master {
struct ttm_lock lock;
};
+struct vmw_vga_topology_state {
+ uint32_t width;
+ uint32_t height;
+ uint32_t primary;
+ uint32_t pos_x;
+ uint32_t pos_y;
+};
+
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
@@ -171,14 +188,19 @@ struct vmw_private {
* VGA registers.
*/
+ struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
uint32_t vga_width;
uint32_t vga_height;
uint32_t vga_depth;
uint32_t vga_bpp;
uint32_t vga_pseudo;
uint32_t vga_red_mask;
- uint32_t vga_blue_mask;
uint32_t vga_green_mask;
+ uint32_t vga_blue_mask;
+ uint32_t vga_bpl;
+ uint32_t vga_pitchlock;
+
+ uint32_t num_displays;
/*
* Framebuffer info.
@@ -393,6 +415,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
+extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -441,6 +464,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t sequence,
bool interruptible,
unsigned long timeout);
+extern void vmw_update_sequence(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo_state);
+
+
+/**
+ * Rudimentary fence objects currently used only for throttling -
+ * vmwgfx_fence.c
+ */
+
+extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
+extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
+extern int vmw_fence_push(struct vmw_fence_queue *queue,
+ uint32_t sequence);
+extern int vmw_fence_pull(struct vmw_fence_queue *queue,
+ uint32_t signaled_sequence);
+extern int vmw_wait_lag(struct vmw_private *dev_priv,
+ struct vmw_fence_queue *queue, uint32_t us);
/**
* Kernel framebuffer - vmwgfx_fb.c
@@ -466,6 +506,11 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header);
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ unsigned width, unsigned height, unsigned pitch,
+ unsigned bbp, unsigned depth);
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/**
* Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dbd36b8910cf..8e396850513c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -644,6 +644,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
ret = copy_from_user(cmd, user_cmd, arg->command_size);
if (unlikely(ret != 0)) {
+ ret = -EFAULT;
DRM_ERROR("Failed copying commands.\n");
goto out_commit;
}
@@ -669,6 +670,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
goto out_err;
vmw_apply_relocations(sw_context);
+
+ if (arg->throttle_us) {
+ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
+ arg->throttle_us);
+
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+
vmw_fifo_commit(dev_priv, arg->command_size);
ret = vmw_fifo_send_fence(dev_priv, &sequence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 7421aaad8d09..b0866f04ec76 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
- /* without multimon its hard to resize */
- if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
- (var->xres != par->max_width ||
- var->yres != par->max_height)) {
- DRM_ERROR("Tried to resize, but we don't have multimon\n");
+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+ (var->xoffset != 0 || var->yoffset != 0)) {
+ DRM_ERROR("Can not handle panning without display topology\n");
return -EINVAL;
}
- if (var->xres > par->max_width ||
- var->yres > par->max_height) {
+ if ((var->xoffset + var->xres) > par->max_width ||
+ (var->yoffset + var->yres) > par->max_height) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
return -EINVAL;
}
@@ -154,27 +152,11 @@ static int vmw_fb_set_par(struct fb_info *info)
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-
- vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
- vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
- vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
-
+ vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+ info->fix.line_length,
+ par->bpp, par->depth);
+ if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
/* TODO check if pitch and offset changes */
-
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
@@ -183,13 +165,13 @@ static int vmw_fb_set_par(struct fb_info *info)
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- } else {
- vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
-
- /* TODO check if pitch and offset changes */
}
+ /* This is really helpful since if this fails the user
+ * can probably not see anything on the screen.
+ */
+ WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
+
return 0;
}
@@ -416,48 +398,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret;
+ /* XXX These shouldn't be hardcoded. */
initial_width = 800;
initial_height = 600;
fb_bbp = 32;
fb_depth = 24;
- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
- fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
- fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
- } else {
- fb_width = min(vmw_priv->fb_max_width, initial_width);
- fb_height = min(vmw_priv->fb_max_height, initial_height);
- }
+ /* XXX As shouldn't these be as well. */
+ fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+ fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
initial_width = min(fb_width, initial_width);
initial_height = min(fb_height, initial_height);
- vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
- vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
-
- fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
+ fb_pitch = fb_width * fb_bbp / 8;
+ fb_size = fb_pitch * fb_height;
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
- fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
-
- DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
- DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
- DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
- DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
- DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
- DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
- DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
- DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
- DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
- DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
- DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
- DRM_DEBUG("fb_pitch %u\n", fb_pitch);
- DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
info = framebuffer_alloc(sizeof(*par), device);
if (!info)
@@ -659,6 +616,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
goto err_unlock;
ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
+
+ /* Could probably bug on */
+ WARN_ON(bo->offset != 0);
+
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
new file mode 100644
index 000000000000..61eacc1b5ca3
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -0,0 +1,173 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "vmwgfx_drv.h"
+
+struct vmw_fence {
+ struct list_head head;
+ uint32_t sequence;
+ struct timespec submitted;
+};
+
+void vmw_fence_queue_init(struct vmw_fence_queue *queue)
+{
+ INIT_LIST_HEAD(&queue->head);
+ queue->lag = ns_to_timespec(0);
+ getrawmonotonic(&queue->lag_time);
+ spin_lock_init(&queue->lock);
+}
+
+void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
+{
+ struct vmw_fence *fence, *next;
+
+ spin_lock(&queue->lock);
+ list_for_each_entry_safe(fence, next, &queue->head, head) {
+ kfree(fence);
+ }
+ spin_unlock(&queue->lock);
+}
+
+int vmw_fence_push(struct vmw_fence_queue *queue,
+ uint32_t sequence)
+{
+ struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+
+ if (unlikely(!fence))
+ return -ENOMEM;
+
+ fence->sequence = sequence;
+ getrawmonotonic(&fence->submitted);
+ spin_lock(&queue->lock);
+ list_add_tail(&fence->head, &queue->head);
+ spin_unlock(&queue->lock);
+
+ return 0;
+}
+
+int vmw_fence_pull(struct vmw_fence_queue *queue,
+ uint32_t signaled_sequence)
+{
+ struct vmw_fence *fence, *next;
+ struct timespec now;
+ bool updated = false;
+
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
+
+ if (list_empty(&queue->head)) {
+ queue->lag = ns_to_timespec(0);
+ queue->lag_time = now;
+ updated = true;
+ goto out_unlock;
+ }
+
+ list_for_each_entry_safe(fence, next, &queue->head, head) {
+ if (signaled_sequence - fence->sequence > (1 << 30))
+ continue;
+
+ queue->lag = timespec_sub(now, fence->submitted);
+ queue->lag_time = now;
+ updated = true;
+ list_del(&fence->head);
+ kfree(fence);
+ }
+
+out_unlock:
+ spin_unlock(&queue->lock);
+
+ return (updated) ? 0 : -EBUSY;
+}
+
+static struct timespec vmw_timespec_add(struct timespec t1,
+ struct timespec t2)
+{
+ t1.tv_sec += t2.tv_sec;
+ t1.tv_nsec += t2.tv_nsec;
+ if (t1.tv_nsec >= 1000000000L) {
+ t1.tv_sec += 1;
+ t1.tv_nsec -= 1000000000L;
+ }
+
+ return t1;
+}
+
+static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
+{
+ struct timespec now;
+
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
+ queue->lag = vmw_timespec_add(queue->lag,
+ timespec_sub(now, queue->lag_time));
+ queue->lag_time = now;
+ spin_unlock(&queue->lock);
+ return queue->lag;
+}
+
+
+static bool vmw_lag_lt(struct vmw_fence_queue *queue,
+ uint32_t us)
+{
+ struct timespec lag, cond;
+
+ cond = ns_to_timespec((s64) us * 1000);
+ lag = vmw_fifo_lag(queue);
+ return (timespec_compare(&lag, &cond) < 1);
+}
+
+int vmw_wait_lag(struct vmw_private *dev_priv,
+ struct vmw_fence_queue *queue, uint32_t us)
+{
+ struct vmw_fence *fence;
+ uint32_t sequence;
+ int ret;
+
+ while (!vmw_lag_lt(queue, us)) {
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->head))
+ sequence = atomic_read(&dev_priv->fence_seq);
+ else {
+ fence = list_first_entry(&queue->head,
+ struct vmw_fence, head);
+ sequence = fence->sequence;
+ }
+ spin_unlock(&queue->lock);
+
+ ret = vmw_wait_fence(dev_priv, false, sequence, true,
+ 3*HZ);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ (void) vmw_fence_pull(queue, sequence);
+ }
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39d43a01d846..e6a1eb7ea954 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
+ if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+ return false;
+
fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
@@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
return true;
}
+bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t caps;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+ return false;
+
+ caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+ if (caps & SVGA_FIFO_CAP_PITCHLOCK)
+ return true;
+
+ return false;
+}
+
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
-
+ vmw_fence_queue_init(&fifo->fence_queue);
return vmw_fifo_send_fence(dev_priv, &dummy);
out_err:
vfree(fifo->static_buffer);
@@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->enable_state);
mutex_unlock(&dev_priv->hw_mutex);
+ vmw_fence_queue_takedown(&fifo->fence_queue);
if (likely(fifo->last_buffer != NULL)) {
vfree(fifo->last_buffer);
@@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
fifo_state->last_buffer_add = true;
vmw_fifo_commit(dev_priv, bytes);
fifo_state->last_buffer_add = false;
+ (void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
+ vmw_update_sequence(dev_priv, fifo_state);
out_err:
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 4d7cb5393860..e92298a6a383 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
return (busy == 0);
}
+void vmw_update_sequence(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo_state)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+ uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+
+ if (dev_priv->last_read_sequence != sequence) {
+ dev_priv->last_read_sequence = sequence;
+ vmw_fence_pull(&fifo_state->fence_queue, sequence);
+ }
+}
bool vmw_fence_signaled(struct vmw_private *dev_priv,
uint32_t sequence)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
struct vmw_fifo_state *fifo_state;
bool ret;
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
- dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ fifo_state = &dev_priv->fifo;
+ vmw_update_sequence(dev_priv, fifo_state);
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
- fifo_state = &dev_priv->fifo;
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
vmw_fifo_idle(dev_priv, sequence))
return true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bbc7c4c30bc7..f1d626112415 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -30,6 +30,8 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
@@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
+ struct vmw_dma_buffer *buffer;
struct delayed_work d_work;
struct mutex work_lock;
bool present_fs;
@@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
vfbs->base.base.depth = 24;
vfbs->base.base.width = width;
vfbs->base.base.height = height;
- vfbs->base.pin = NULL;
- vfbs->base.unpin = NULL;
+ vfbs->base.pin = &vmw_surface_dmabuf_pin;
+ vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
vfbs->surface = surface;
mutex_init(&vfbs->work_lock);
INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
@@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.create_handle = vmw_framebuffer_create_handle,
};
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_framebuffer_surface *vfbs =
+ vmw_framebuffer_to_vfbs(&vfb->base);
+ unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
+ int ret;
+
+ vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
+ if (unlikely(vfbs->buffer == NULL))
+ return -ENOMEM;
+
+ vmw_overlay_pause_all(dev_priv);
+ ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
+ &vmw_vram_ne_placement,
+ false, &vmw_dmabuf_bo_free);
+ vmw_overlay_resume_all(dev_priv);
+
+ return ret;
+}
+
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
+{
+ struct ttm_buffer_object *bo;
+ struct vmw_framebuffer_surface *vfbs =
+ vmw_framebuffer_to_vfbs(&vfb->base);
+
+ bo = &vfbs->buffer->base;
+ ttm_bo_unref(&bo);
+ vfbs->buffer = NULL;
+
+ return 0;
+}
+
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
vmw_framebuffer_to_vfbd(&vfb->base);
int ret;
+
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
- if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-
- vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
- vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
- vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
- vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
- vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
- vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
- vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
- vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
- } else
- WARN_ON(true);
-
vmw_overlay_resume_all(dev_priv);
+ WARN_ON(ret != 0);
+
return 0;
}
@@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
/* XXX get the first 3 from the surface info */
vfbd->base.base.bits_per_pixel = 32;
- vfbd->base.base.pitch = width * 32 / 4;
+ vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
vfbd->base.base.depth = 24;
vfbd->base.base.width = width;
vfbd->base.base.height = height;
@@ -765,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
- dev->mode_config.max_width = dev_priv->fb_max_width;
- dev->mode_config.max_height = dev_priv->fb_max_height;
+ /* assumed largest fb size */
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
ret = vmw_kms_init_legacy_display_system(dev_priv);
@@ -826,49 +846,140 @@ out:
return ret;
}
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ unsigned width, unsigned height, unsigned pitch,
+ unsigned bbp, unsigned depth)
+{
+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+ vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
+ else if (vmw_fifo_have_pitchlock(vmw_priv))
+ iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+}
+
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
{
- /*
- * setup a single multimon monitor with the size
- * of 0x0, this stops the UI from resizing when we
- * change the framebuffer size
- */
- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- }
+ struct vmw_vga_topology_state *save;
+ uint32_t i;
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
- vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
+ vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
- vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
+ vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+ vmw_priv->vga_pitchlock =
+ vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
+ else if (vmw_fifo_have_pitchlock(vmw_priv))
+ vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
+ SVGA_FIFO_PITCHLOCK);
+
+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
+ return 0;
+ vmw_priv->num_displays = vmw_read(vmw_priv,
+ SVGA_REG_NUM_GUEST_DISPLAYS);
+
+ for (i = 0; i < vmw_priv->num_displays; ++i) {
+ save = &vmw_priv->vga_save[i];
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
+ save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
+ save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
+ save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
+ save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
+ save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
return 0;
}
int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
{
+ struct vmw_vga_topology_state *save;
+ uint32_t i;
+
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+ vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
+ vmw_priv->vga_pitchlock);
+ else if (vmw_fifo_have_pitchlock(vmw_priv))
+ iowrite32(vmw_priv->vga_pitchlock,
+ vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+
+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
+ return 0;
- /* TODO check for multimon */
- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
+ for (i = 0; i < vmw_priv->num_displays; ++i) {
+ save = &vmw_priv->vga_save[i];
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
return 0;
}
+
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_update_layout_arg *arg =
+ (struct drm_vmw_update_layout_arg *)data;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ void __user *user_rects;
+ struct drm_vmw_rect *rects;
+ unsigned rects_size;
+ int ret;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (!arg->num_outputs) {
+ struct drm_vmw_rect def_rect = {0, 0, 800, 600};
+ vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
+ goto out_unlock;
+ }
+
+ rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
+ rects = kzalloc(rects_size, GFP_KERNEL);
+ if (unlikely(!rects)) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ user_rects = (void __user *)(unsigned long)arg->rects;
+ ret = copy_from_user(rects, user_rects, rects_size);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to get rects.\n");
+ goto out_free;
+ }
+
+ vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
+
+out_free:
+ kfree(rects);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8b95249f0531..8a398a0339b6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
/*
- * Legacy display unit functions - vmwgfx_ldu.h
+ * Legacy display unit functions - vmwgfx_ldu.c
*/
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
+ struct drm_vmw_rect *rects);
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 90891593bf6c..cfaf690a5b2f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -38,6 +38,7 @@ struct vmw_legacy_display {
struct list_head active;
unsigned num_active;
+ unsigned last_num_active;
struct vmw_framebuffer *fb;
};
@@ -48,9 +49,12 @@ struct vmw_legacy_display {
struct vmw_legacy_display_unit {
struct vmw_display_unit base;
- struct list_head active;
+ unsigned pref_width;
+ unsigned pref_height;
+ bool pref_active;
+ struct drm_display_mode *pref_mode;
- unsigned unit;
+ struct list_head active;
};
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
@@ -88,23 +92,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
- struct drm_crtc *crtc;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_crtc *crtc = NULL;
int i = 0;
- /* to stop the screen from changing size on resize */
- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
- for (i = 0; i < lds->num_active; i++) {
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ /* If there is no display topology the host just assumes
+ * that the guest will set the same layout as the host.
+ */
+ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
+ int w = 0, h = 0;
+ list_for_each_entry(entry, &lds->active, active) {
+ crtc = &entry->base.crtc;
+ w = max(w, crtc->x + crtc->mode.hdisplay);
+ h = max(h, crtc->y + crtc->mode.vdisplay);
+ i++;
+ }
+
+ if (crtc == NULL)
+ return 0;
+ fb = entry->base.crtc.fb;
+
+ vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+ fb->bits_per_pixel, fb->depth);
+
+ return 0;
}
- /* Now set the mode */
- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
+ if (!list_empty(&lds->active)) {
+ entry = list_entry(lds->active.next, typeof(*entry), active);
+ fb = entry->base.crtc.fb;
+
+ vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
+ fb->bits_per_pixel, fb->depth);
+ }
+
+ /* Make sure we always show something. */
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
+ lds->num_active ? lds->num_active : 1);
+
i = 0;
list_for_each_entry(entry, &lds->active, active) {
crtc = &entry->base.crtc;
@@ -120,6 +145,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
i++;
}
+ BUG_ON(i != lds->num_active);
+
+ lds->last_num_active = lds->num_active;
+
return 0;
}
@@ -130,6 +159,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
if (list_empty(&ldu->active))
return 0;
+ /* Must init otherwise list_empty(&ldu->active) will not work. */
list_del_init(&ldu->active);
if (--(ld->num_active) == 0) {
BUG_ON(!ld->fb);
@@ -149,24 +179,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *entry;
struct list_head *at;
+ BUG_ON(!ld->num_active && ld->fb);
+ if (vfb != ld->fb) {
+ if (ld->fb && ld->fb->unpin)
+ ld->fb->unpin(ld->fb);
+ if (vfb->pin)
+ vfb->pin(vfb);
+ ld->fb = vfb;
+ }
+
if (!list_empty(&ldu->active))
return 0;
at = &ld->active;
list_for_each_entry(entry, &ld->active, active) {
- if (entry->unit > ldu->unit)
+ if (entry->base.unit > ldu->base.unit)
break;
at = &entry->active;
}
list_add(&ldu->active, at);
- if (ld->num_active++ == 0) {
- BUG_ON(ld->fb);
- if (vfb->pin)
- vfb->pin(vfb);
- ld->fb = vfb;
- }
+
+ ld->num_active++;
return 0;
}
@@ -208,6 +243,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
/* ldu only supports one fb active at the time */
if (dev_priv->ldu_priv->fb && vfb &&
+ !(dev_priv->ldu_priv->num_active == 1 &&
+ !list_empty(&ldu->active)) &&
dev_priv->ldu_priv->fb != vfb) {
DRM_ERROR("Multiple framebuffers not supported\n");
return -EINVAL;
@@ -300,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
static enum drm_connector_status
vmw_ldu_connector_detect(struct drm_connector *connector)
{
- /* XXX vmwctrl should control connection status */
- if (vmw_connector_to_ldu(connector)->base.unit == 0)
+ if (vmw_connector_to_ldu(connector)->pref_active)
return connector_status_connected;
return connector_status_disconnected;
}
@@ -312,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@60Hz */
- { DRM_MODE("800x600",
- DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
- 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
- 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
@@ -387,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height)
{
+ struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode = NULL;
+ struct drm_display_mode prefmode = { DRM_MODE("preferred",
+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
int i;
+ /* Add preferred mode */
+ {
+ mode = drm_mode_duplicate(dev, &prefmode);
+ if (!mode)
+ return 0;
+ mode->hdisplay = ldu->pref_width;
+ mode->vdisplay = ldu->pref_height;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ drm_mode_probed_add(connector, mode);
+
+ if (ldu->pref_mode) {
+ list_del_init(&ldu->pref_mode->head);
+ drm_mode_destroy(dev, ldu->pref_mode);
+ }
+
+ ldu->pref_mode = mode;
+ }
+
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
vmw_ldu_connector_builtin[i].vdisplay > max_height)
@@ -443,18 +502,21 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
if (!ldu)
return -ENOMEM;
- ldu->unit = unit;
+ ldu->base.unit = unit;
crtc = &ldu->base.crtc;
encoder = &ldu->base.encoder;
connector = &ldu->base.connector;
+ INIT_LIST_HEAD(&ldu->active);
+
+ ldu->pref_active = (unit == 0);
+ ldu->pref_width = 800;
+ ldu->pref_height = 600;
+ ldu->pref_mode = NULL;
+
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- /* Initial status */
- if (unit == 0)
- connector->status = connector_status_connected;
- else
- connector->status = connector_status_disconnected;
+ connector->status = vmw_ldu_connector_detect(connector);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_LVDS);
@@ -462,8 +524,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
- INIT_LIST_HEAD(&ldu->active);
-
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
drm_connector_attach_property(connector,
@@ -487,18 +547,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
dev_priv->ldu_priv->num_active = 0;
+ dev_priv->ldu_priv->last_num_active = 0;
dev_priv->ldu_priv->fb = NULL;
drm_mode_create_dirty_info_property(dev_priv->dev);
vmw_ldu_init(dev_priv, 0);
- vmw_ldu_init(dev_priv, 1);
- vmw_ldu_init(dev_priv, 2);
- vmw_ldu_init(dev_priv, 3);
- vmw_ldu_init(dev_priv, 4);
- vmw_ldu_init(dev_priv, 5);
- vmw_ldu_init(dev_priv, 6);
- vmw_ldu_init(dev_priv, 7);
+ /* for old hardware without multimon only enable one display */
+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_ldu_init(dev_priv, 1);
+ vmw_ldu_init(dev_priv, 2);
+ vmw_ldu_init(dev_priv, 3);
+ vmw_ldu_init(dev_priv, 4);
+ vmw_ldu_init(dev_priv, 5);
+ vmw_ldu_init(dev_priv, 6);
+ vmw_ldu_init(dev_priv, 7);
+ }
return 0;
}
@@ -514,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
return 0;
}
+
+int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
+ struct drm_vmw_rect *rects)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_legacy_display_unit *ldu;
+ struct drm_connector *con;
+ int i;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+#if 0
+ DRM_INFO("%s: new layout ", __func__);
+ for (i = 0; i < (int)num; i++)
+ DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
+ rects[i].w, rects[i].h);
+ DRM_INFO("\n");
+#else
+ (void)i;
+#endif
+
+ list_for_each_entry(con, &dev->mode_config.connector_list, head) {
+ ldu = vmw_connector_to_ldu(con);
+ if (num > ldu->base.unit) {
+ ldu->pref_width = rects[ldu->base.unit].w;
+ ldu->pref_height = rects[ldu->base.unit].h;
+ ldu->pref_active = true;
+ } else {
+ ldu->pref_width = 800;
+ ldu->pref_height = 600;
+ ldu->pref_active = false;
+ }
+ con->status = vmw_ldu_connector_detect(con);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index ad566c85b075..df2036ed18d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
if (stream->buf != buf)
stream->buf = vmw_dmabuf_reference(buf);
stream->saved = *arg;
+ /* stream is no longer stopped/paused */
+ stream->paused = false;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7745394c3e63..5f2d5df01e5c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -597,8 +597,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ret = copy_from_user(srf->sizes, user_sizes,
srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
goto out_err1;
+ }
if (srf->scanout &&
srf->num_sizes == 1 &&
@@ -697,9 +699,11 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (user_sizes)
ret = copy_to_user(user_sizes, srf->sizes,
srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
+ ret = -EFAULT;
+ }
out_bad_resource:
out_no_reference:
ttm_base_object_unref(&base);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 441e38c95a85..b87569e96b16 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1,12 +1,32 @@
/*
- * vgaarb.c
+ * vgaarb.c: Implements the VGA arbitration. For details refer to
+ * Documentation/vgaarbiter.txt
+ *
*
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
* (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
*
- * Implements the VGA arbitration. For details refer to
- * Documentation/vgaarbiter.txt
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS
+ * IN THE SOFTWARE.
+ *
*/
#include <linux/module.h>
@@ -155,8 +175,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
(vgadev->decodes & VGA_RSRC_LEGACY_MEM))
rsrc |= VGA_RSRC_LEGACY_MEM;
- pr_devel("%s: %d\n", __func__, rsrc);
- pr_devel("%s: owns: %d\n", __func__, vgadev->owns);
+ pr_debug("%s: %d\n", __func__, rsrc);
+ pr_debug("%s: owns: %d\n", __func__, vgadev->owns);
/* Check what resources we need to acquire */
wants = rsrc & ~vgadev->owns;
@@ -268,7 +288,7 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
{
unsigned int old_locks = vgadev->locks;
- pr_devel("%s\n", __func__);
+ pr_debug("%s\n", __func__);
/* Update our counters, and account for equivalent legacy resources
* if we decode them
@@ -575,6 +595,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
else
vga_decode_count--;
}
+ pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
}
void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
@@ -831,7 +852,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 5;
remaining -= 5;
- pr_devel("client 0x%p called 'lock'\n", priv);
+ pr_debug("client 0x%p called 'lock'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
@@ -867,7 +888,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 7;
remaining -= 7;
- pr_devel("client 0x%p called 'unlock'\n", priv);
+ pr_debug("client 0x%p called 'unlock'\n", priv);
if (strncmp(curr_pos, "all", 3) == 0)
io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
@@ -917,7 +938,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 8;
remaining -= 8;
- pr_devel("client 0x%p called 'trylock'\n", priv);
+ pr_debug("client 0x%p called 'trylock'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
@@ -961,7 +982,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 7;
remaining -= 7;
- pr_devel("client 0x%p called 'target'\n", priv);
+ pr_debug("client 0x%p called 'target'\n", priv);
/* if target is default */
if (!strncmp(curr_pos, "default", 7))
pdev = pci_dev_get(vga_default_device());
@@ -971,11 +992,11 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
ret_val = -EPROTO;
goto done;
}
- pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
+ pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
pbus = pci_find_bus(domain, bus);
- pr_devel("vgaarb: pbus %p\n", pbus);
+ pr_debug("vgaarb: pbus %p\n", pbus);
if (pbus == NULL) {
pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n",
domain, bus);
@@ -983,7 +1004,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
goto done;
}
pdev = pci_get_slot(pbus, devfn);
- pr_devel("vgaarb: pdev %p\n", pdev);
+ pr_debug("vgaarb: pdev %p\n", pdev);
if (!pdev) {
pr_err("vgaarb: invalid PCI address %x:%x\n",
bus, devfn);
@@ -993,7 +1014,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
}
vgadev = vgadev_find(pdev);
- pr_devel("vgaarb: vgadev %p\n", vgadev);
+ pr_debug("vgaarb: vgadev %p\n", vgadev);
if (vgadev == NULL) {
pr_err("vgaarb: this pci device is not a vga device\n");
pci_dev_put(pdev);
@@ -1029,7 +1050,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
} else if (strncmp(curr_pos, "decodes ", 8) == 0) {
curr_pos += 8;
remaining -= 8;
- pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv);
+ pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
@@ -1058,7 +1079,7 @@ static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
{
struct vga_arb_private *priv = file->private_data;
- pr_devel("%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (priv == NULL)
return -ENODEV;
@@ -1071,7 +1092,7 @@ static int vga_arb_open(struct inode *inode, struct file *file)
struct vga_arb_private *priv;
unsigned long flags;
- pr_devel("%s\n", __func__);
+ pr_debug("%s\n", __func__);
priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL);
if (priv == NULL)
@@ -1101,7 +1122,7 @@ static int vga_arb_release(struct inode *inode, struct file *file)
unsigned long flags;
int i;
- pr_devel("%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (priv == NULL)
return -ENODEV;
@@ -1112,7 +1133,7 @@ static int vga_arb_release(struct inode *inode, struct file *file)
uc = &priv->cards[i];
if (uc->pdev == NULL)
continue;
- pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n",
+ pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n",
uc->io_cnt, uc->mem_cnt);
while (uc->io_cnt--)
vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
@@ -1165,7 +1186,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action,
struct pci_dev *pdev = to_pci_dev(dev);
bool notify = false;
- pr_devel("%s\n", __func__);
+ pr_debug("%s\n", __func__);
/* For now we're only intereted in devices added and removed. I didn't
* test this thing here, so someone needs to double check for the
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 76ba59b9fea1..132278fa6240 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -347,6 +347,14 @@ config HID_QUANTA
---help---
Support for Quanta Optical Touch dual-touch panels.
+config HID_ROCCAT
+ tristate "Roccat special event support"
+ depends on USB_HID
+ ---help---
+ Support for Roccat special events.
+ Say Y here if you have a Roccat mouse or keyboard and want OSD or
+ macro execution support.
+
config HID_ROCCAT_KONE
tristate "Roccat Kone Mouse support"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 22e47eaeea32..987fa0627367 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
+obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e10e314d38cc..aa0f7dcabcd7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1301,6 +1301,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 56f314fbd4f9..c94026768570 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -811,7 +811,7 @@ static const char *relatives[REL_MAX + 1] = {
[REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
};
-static const char *absolutes[ABS_MAX + 1] = {
+static const char *absolutes[ABS_CNT] = {
[ABS_X] = "X", [ABS_Y] = "Y",
[ABS_Z] = "Z", [ABS_RX] = "Rx",
[ABS_RY] = "Ry", [ABS_RZ] = "Rz",
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 62416e6baeca..3975e039c3dd 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
static const struct hid_device_id gyration_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ }
};
MODULE_DEVICE_TABLE(hid, gyration_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9776896cc4fc..6af77ed0b555 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -282,6 +282,7 @@
#define USB_VENDOR_ID_GYRATION 0x0c16
#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
+#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
#define USB_VENDOR_ID_HAPP 0x078b
#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 66e694054ba2..17f2dc04f883 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -37,6 +37,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include "hid-ids.h"
+#include "hid-roccat.h"
#include "hid-roccat-kone.h"
static void kone_set_settings_checksum(struct kone_settings *settings)
@@ -263,7 +264,7 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
return 0;
}
-static ssize_t kone_sysfs_read_settings(struct kobject *kobj,
+static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
struct device *dev = container_of(kobj, struct device, kobj);
@@ -287,7 +288,7 @@ static ssize_t kone_sysfs_read_settings(struct kobject *kobj,
* This function keeps values in kone_device up to date and assumes that in
* case of error the old data is still valid
*/
-static ssize_t kone_sysfs_write_settings(struct kobject *kobj,
+static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
struct device *dev = container_of(kobj, struct device, kobj);
@@ -342,31 +343,31 @@ static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
return count;
}
-static ssize_t kone_sysfs_read_profile1(struct kobject *kobj,
+static ssize_t kone_sysfs_read_profile1(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1);
}
-static ssize_t kone_sysfs_read_profile2(struct kobject *kobj,
+static ssize_t kone_sysfs_read_profile2(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2);
}
-static ssize_t kone_sysfs_read_profile3(struct kobject *kobj,
+static ssize_t kone_sysfs_read_profile3(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3);
}
-static ssize_t kone_sysfs_read_profile4(struct kobject *kobj,
+static ssize_t kone_sysfs_read_profile4(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4);
}
-static ssize_t kone_sysfs_read_profile5(struct kobject *kobj,
+static ssize_t kone_sysfs_read_profile5(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5);
@@ -404,31 +405,31 @@ static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
return sizeof(struct kone_profile);
}
-static ssize_t kone_sysfs_write_profile1(struct kobject *kobj,
+static ssize_t kone_sysfs_write_profile1(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1);
}
-static ssize_t kone_sysfs_write_profile2(struct kobject *kobj,
+static ssize_t kone_sysfs_write_profile2(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2);
}
-static ssize_t kone_sysfs_write_profile3(struct kobject *kobj,
+static ssize_t kone_sysfs_write_profile3(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3);
}
-static ssize_t kone_sysfs_write_profile4(struct kobject *kobj,
+static ssize_t kone_sysfs_write_profile4(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4);
}
-static ssize_t kone_sysfs_write_profile5(struct kobject *kobj,
+static ssize_t kone_sysfs_write_profile5(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5);
@@ -849,6 +850,16 @@ static int kone_init_specials(struct hid_device *hdev)
"couldn't init struct kone_device\n");
goto exit_free;
}
+
+ retval = roccat_connect(hdev);
+ if (retval < 0) {
+ dev_err(&hdev->dev, "couldn't init char dev\n");
+ /* be tolerant about not getting chrdev */
+ } else {
+ kone->roccat_claimed = 1;
+ kone->chrdev_minor = retval;
+ }
+
retval = kone_create_sysfs_attributes(intf);
if (retval) {
dev_err(&hdev->dev, "cannot create sysfs files\n");
@@ -868,10 +879,14 @@ exit_free:
static void kone_remove_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct kone_device *kone;
if (intf->cur_altsetting->desc.bInterfaceProtocol
== USB_INTERFACE_PROTOCOL_MOUSE) {
kone_remove_sysfs_attributes(intf);
+ kone = hid_get_drvdata(hdev);
+ if (kone->roccat_claimed)
+ roccat_disconnect(kone->chrdev_minor);
kfree(hid_get_drvdata(hdev));
}
}
@@ -930,6 +945,37 @@ static void kone_keep_values_up_to_date(struct kone_device *kone,
}
}
+static void kone_report_to_chrdev(struct kone_device const *kone,
+ struct kone_mouse_event const *event)
+{
+ struct kone_roccat_report roccat_report;
+
+ switch (event->event) {
+ case kone_mouse_event_switch_profile:
+ case kone_mouse_event_switch_dpi:
+ case kone_mouse_event_osd_profile:
+ case kone_mouse_event_osd_dpi:
+ roccat_report.event = event->event;
+ roccat_report.value = event->value;
+ roccat_report.key = 0;
+ roccat_report_event(kone->chrdev_minor,
+ (uint8_t *)&roccat_report,
+ sizeof(struct kone_roccat_report));
+ break;
+ case kone_mouse_event_call_overlong_macro:
+ if (event->value == kone_keystroke_action_press) {
+ roccat_report.event = kone_mouse_event_call_overlong_macro;
+ roccat_report.value = kone->actual_profile;
+ roccat_report.key = event->macro_key;
+ roccat_report_event(kone->chrdev_minor,
+ (uint8_t *)&roccat_report,
+ sizeof(struct kone_roccat_report));
+ }
+ break;
+ }
+
+}
+
/*
* Is called for keyboard- and mousepart.
* Only mousepart gets informations about special events in its extended event
@@ -958,6 +1004,9 @@ static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
kone_keep_values_up_to_date(kone, event);
+ if (kone->roccat_claimed)
+ kone_report_to_chrdev(kone, event);
+
return 0; /* always do further processing */
}
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
index b413b10a7f8a..003e6f81c195 100644
--- a/drivers/hid/hid-roccat-kone.h
+++ b/drivers/hid/hid-roccat-kone.h
@@ -189,6 +189,12 @@ enum kone_commands {
kone_command_firmware = 0xe5a
};
+struct kone_roccat_report {
+ uint8_t event;
+ uint8_t value; /* holds dpi or profile value */
+ uint8_t key; /* macro key on overlong macro execution */
+};
+
#pragma pack(pop)
struct kone_device {
@@ -219,6 +225,9 @@ struct kone_device {
* so it's read only once
*/
int firmware_version;
+
+ int roccat_claimed;
+ int chrdev_minor;
};
#endif
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
new file mode 100644
index 000000000000..e05d48edb66f
--- /dev/null
+++ b/drivers/hid/hid-roccat.c
@@ -0,0 +1,428 @@
+/*
+ * Roccat driver for Linux
+ *
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Module roccat is a char device used to report special events of roccat
+ * hardware to userland. These events include requests for on-screen-display of
+ * profile or dpi settings or requests for execution of macro sequences that are
+ * not stored in device. The information in these events depends on hid device
+ * implementation and contains data that is not available in a single hid event
+ * or else hidraw could have been used.
+ * It is inspired by hidraw, but uses only one circular buffer for all readers.
+ */
+
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+
+#include "hid-roccat.h"
+
+#define ROCCAT_FIRST_MINOR 0
+#define ROCCAT_MAX_DEVICES 8
+
+/* should be a power of 2 for performance reason */
+#define ROCCAT_CBUF_SIZE 16
+
+struct roccat_report {
+ uint8_t *value;
+ int len;
+};
+
+struct roccat_device {
+ unsigned int minor;
+ int open;
+ int exist;
+ wait_queue_head_t wait;
+ struct device *dev;
+ struct hid_device *hid;
+ struct list_head readers;
+ /* protects modifications of readers list */
+ struct mutex readers_lock;
+
+ /*
+ * circular_buffer has one writer and multiple readers with their own
+ * read pointers
+ */
+ struct roccat_report cbuf[ROCCAT_CBUF_SIZE];
+ int cbuf_end;
+ struct mutex cbuf_lock;
+};
+
+struct roccat_reader {
+ struct list_head node;
+ struct roccat_device *device;
+ int cbuf_start;
+};
+
+static int roccat_major;
+static struct class *roccat_class;
+static struct cdev roccat_cdev;
+
+static struct roccat_device *devices[ROCCAT_MAX_DEVICES];
+/* protects modifications of devices array */
+static DEFINE_MUTEX(devices_lock);
+
+static ssize_t roccat_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct roccat_reader *reader = file->private_data;
+ struct roccat_device *device = reader->device;
+ struct roccat_report *report;
+ ssize_t retval = 0, len;
+ DECLARE_WAITQUEUE(wait, current);
+
+ mutex_lock(&device->cbuf_lock);
+
+ /* no data? */
+ if (reader->cbuf_start == device->cbuf_end) {
+ add_wait_queue(&device->wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* wait for data */
+ while (reader->cbuf_start == device->cbuf_end) {
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ if (!device->exist) {
+ retval = -EIO;
+ break;
+ }
+
+ mutex_unlock(&device->cbuf_lock);
+ schedule();
+ mutex_lock(&device->cbuf_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&device->wait, &wait);
+ }
+
+ /* here we either have data or a reason to return if retval is set */
+ if (retval)
+ goto exit_unlock;
+
+ report = &device->cbuf[reader->cbuf_start];
+ /*
+ * If report is larger than requested amount of data, rest of report
+ * is lost!
+ */
+ len = report->len > count ? count : report->len;
+
+ if (copy_to_user(buffer, report->value, len)) {
+ retval = -EFAULT;
+ goto exit_unlock;
+ }
+ retval += len;
+ reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
+
+exit_unlock:
+ mutex_unlock(&device->cbuf_lock);
+ return retval;
+}
+
+static unsigned int roccat_poll(struct file *file, poll_table *wait)
+{
+ struct roccat_reader *reader = file->private_data;
+ poll_wait(file, &reader->device->wait, wait);
+ if (reader->cbuf_start != reader->device->cbuf_end)
+ return POLLIN | POLLRDNORM;
+ if (!reader->device->exist)
+ return POLLERR | POLLHUP;
+ return 0;
+}
+
+static int roccat_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ struct roccat_reader *reader;
+ struct roccat_device *device;
+ int error = 0;
+
+ reader = kzalloc(sizeof(struct roccat_reader), GFP_KERNEL);
+ if (!reader)
+ return -ENOMEM;
+
+ mutex_lock(&devices_lock);
+
+ device = devices[minor];
+
+ mutex_lock(&device->readers_lock);
+
+ if (!device) {
+ printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
+ minor);
+ error = -ENODEV;
+ goto exit_unlock;
+ }
+
+ if (!device->open++) {
+ /* power on device on adding first reader */
+ if (device->hid->ll_driver->power) {
+ error = device->hid->ll_driver->power(device->hid,
+ PM_HINT_FULLON);
+ if (error < 0) {
+ --device->open;
+ goto exit_unlock;
+ }
+ }
+ error = device->hid->ll_driver->open(device->hid);
+ if (error < 0) {
+ if (device->hid->ll_driver->power)
+ device->hid->ll_driver->power(device->hid,
+ PM_HINT_NORMAL);
+ --device->open;
+ goto exit_unlock;
+ }
+ }
+
+ reader->device = device;
+ /* new reader doesn't get old events */
+ reader->cbuf_start = device->cbuf_end;
+
+ list_add_tail(&reader->node, &device->readers);
+ file->private_data = reader;
+
+exit_unlock:
+ mutex_unlock(&device->readers_lock);
+ mutex_unlock(&devices_lock);
+ return error;
+}
+
+static int roccat_release(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ struct roccat_reader *reader = file->private_data;
+ struct roccat_device *device;
+
+ mutex_lock(&devices_lock);
+
+ device = devices[minor];
+ if (!device) {
+ mutex_unlock(&devices_lock);
+ printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
+ minor);
+ return -ENODEV;
+ }
+
+ mutex_lock(&device->readers_lock);
+ list_del(&reader->node);
+ mutex_unlock(&device->readers_lock);
+ kfree(reader);
+
+ if (!--device->open) {
+ /* removing last reader */
+ if (device->exist) {
+ if (device->hid->ll_driver->power)
+ device->hid->ll_driver->power(device->hid,
+ PM_HINT_NORMAL);
+ device->hid->ll_driver->close(device->hid);
+ } else {
+ kfree(device);
+ }
+ }
+
+ mutex_unlock(&devices_lock);
+
+ return 0;
+}
+
+/*
+ * roccat_report_event() - output data to readers
+ * @minor: minor device number returned by roccat_connect()
+ * @data: pointer to data
+ * @len: size of data
+ *
+ * Return value is zero on success, a negative error code on failure.
+ *
+ * This is called from interrupt handler.
+ */
+int roccat_report_event(int minor, u8 const *data, int len)
+{
+ struct roccat_device *device;
+ struct roccat_reader *reader;
+ struct roccat_report *report;
+ uint8_t *new_value;
+
+ new_value = kmemdup(data, len, GFP_ATOMIC);
+ if (!new_value)
+ return -ENOMEM;
+
+ device = devices[minor];
+
+ report = &device->cbuf[device->cbuf_end];
+
+ /* passing NULL is safe */
+ kfree(report->value);
+
+ report->value = new_value;
+ report->len = len;
+ device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE;
+
+ list_for_each_entry(reader, &device->readers, node) {
+ /*
+ * As we already inserted one element, the buffer can't be
+ * empty. If start and end are equal, buffer is full and we
+ * increase start, so that slow reader misses one event, but
+ * gets the newer ones in the right order.
+ */
+ if (reader->cbuf_start == device->cbuf_end)
+ reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
+ }
+
+ wake_up_interruptible(&device->wait);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(roccat_report_event);
+
+/*
+ * roccat_connect() - create a char device for special event output
+ * @hid: the hid device the char device should be connected to.
+ *
+ * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
+ * success, a negative error code on failure.
+ */
+int roccat_connect(struct hid_device *hid)
+{
+ unsigned int minor;
+ struct roccat_device *device;
+ int temp;
+
+ device = kzalloc(sizeof(struct roccat_device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ mutex_lock(&devices_lock);
+
+ for (minor = 0; minor < ROCCAT_MAX_DEVICES; ++minor) {
+ if (devices[minor])
+ continue;
+ break;
+ }
+
+ if (minor < ROCCAT_MAX_DEVICES) {
+ devices[minor] = device;
+ } else {
+ mutex_unlock(&devices_lock);
+ kfree(device);
+ return -EINVAL;
+ }
+
+ device->dev = device_create(roccat_class, &hid->dev,
+ MKDEV(roccat_major, minor), NULL,
+ "%s%s%d", "roccat", hid->driver->name, minor);
+
+ if (IS_ERR(device->dev)) {
+ devices[minor] = NULL;
+ mutex_unlock(&devices_lock);
+ temp = PTR_ERR(device->dev);
+ kfree(device);
+ return temp;
+ }
+
+ mutex_unlock(&devices_lock);
+
+ init_waitqueue_head(&device->wait);
+ INIT_LIST_HEAD(&device->readers);
+ mutex_init(&device->readers_lock);
+ mutex_init(&device->cbuf_lock);
+ device->minor = minor;
+ device->hid = hid;
+ device->exist = 1;
+ device->cbuf_end = 0;
+
+ return minor;
+}
+EXPORT_SYMBOL_GPL(roccat_connect);
+
+/* roccat_disconnect() - remove char device from hid device
+ * @minor: the minor device number returned by roccat_connect()
+ */
+void roccat_disconnect(int minor)
+{
+ struct roccat_device *device;
+
+ mutex_lock(&devices_lock);
+ device = devices[minor];
+ devices[minor] = NULL;
+ mutex_unlock(&devices_lock);
+
+ device->exist = 0; /* TODO exist maybe not needed */
+
+ device_destroy(roccat_class, MKDEV(roccat_major, minor));
+
+ if (device->open) {
+ device->hid->ll_driver->close(device->hid);
+ wake_up_interruptible(&device->wait);
+ } else {
+ kfree(device);
+ }
+}
+EXPORT_SYMBOL_GPL(roccat_disconnect);
+
+static const struct file_operations roccat_ops = {
+ .owner = THIS_MODULE,
+ .read = roccat_read,
+ .poll = roccat_poll,
+ .open = roccat_open,
+ .release = roccat_release,
+};
+
+static int __init roccat_init(void)
+{
+ int retval;
+ dev_t dev_id;
+
+ retval = alloc_chrdev_region(&dev_id, ROCCAT_FIRST_MINOR,
+ ROCCAT_MAX_DEVICES, "roccat");
+
+ roccat_major = MAJOR(dev_id);
+
+ if (retval < 0) {
+ printk(KERN_WARNING "roccat: can't get major number\n");
+ return retval;
+ }
+
+ roccat_class = class_create(THIS_MODULE, "roccat");
+ if (IS_ERR(roccat_class)) {
+ retval = PTR_ERR(roccat_class);
+ unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
+ return retval;
+ }
+
+ cdev_init(&roccat_cdev, &roccat_ops);
+ cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES);
+
+ return 0;
+}
+
+static void __exit roccat_exit(void)
+{
+ dev_t dev_id = MKDEV(roccat_major, 0);
+
+ cdev_del(&roccat_cdev);
+ class_destroy(roccat_class);
+ unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
+}
+
+module_init(roccat_init);
+module_exit(roccat_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat char device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat.h b/drivers/hid/hid-roccat.h
new file mode 100644
index 000000000000..d8aae0c1fa7e
--- /dev/null
+++ b/drivers/hid/hid-roccat.h
@@ -0,0 +1,31 @@
+#ifndef __HID_ROCCAT_H
+#define __HID_ROCCAT_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/types.h>
+
+#if defined(CONFIG_HID_ROCCAT) || defined (CONFIG_HID_ROCCAT_MODULE)
+int roccat_connect(struct hid_device *hid);
+void roccat_disconnect(int minor);
+int roccat_report_event(int minor, u8 const *data, int len);
+#else
+static inline int roccat_connect(struct hid_device *hid) { return -1; }
+static inline void roccat_disconnect(int minor) {}
+static inline int roccat_report_event(int minor, u8 const *data, int len)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9be8e1754a0b..e19cf8eb6ccf 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -447,13 +447,14 @@ config SENSORS_IT87
will be called it87.
config SENSORS_LM63
- tristate "National Semiconductor LM63"
+ tristate "National Semiconductor LM63 and LM64"
depends on I2C
help
- If you say yes here you get support for the National Semiconductor
- LM63 remote diode digital temperature sensor with integrated fan
- control. Such chips are found on the Tyan S4882 (Thunder K8QS Pro)
- motherboard, among others.
+ If you say yes here you get support for the National
+ Semiconductor LM63 and LM64 remote diode digital temperature
+ sensors with integrated fan control. Such chips are found
+ on the Tyan S4882 (Thunder K8QS Pro) motherboard, among
+ others.
This driver can also be built as a module. If so, the module
will be called lm63.
@@ -492,7 +493,8 @@ config SENSORS_LM75
- NXP's LM75A
- ST Microelectronics STDS75
- TelCom (now Microchip) TCN75
- - Texas Instruments TMP100, TMP101, TMP75, TMP175, TMP275
+ - Texas Instruments TMP100, TMP101, TMP105, TMP75, TMP175,
+ TMP275
This driver supports driver model based binding through board
specific I2C device tables.
@@ -749,6 +751,16 @@ config SENSORS_DME1737
This driver can also be built as a module. If so, the module
will be called dme1737.
+config SENSORS_EMC1403
+ tristate "SMSC EMC1403 thermal sensor"
+ depends on I2C
+ help
+ If you say yes here you get support for the SMSC EMC1403
+ temperature monitoring chip.
+
+ Threshold values can be configured using sysfs.
+ Data from the different diodes are accessible via sysfs.
+
config SENSORS_SMSC47M1
tristate "SMSC LPC47M10x and compatibles"
help
@@ -802,6 +814,15 @@ config SENSORS_ADS7828
This driver can also be built as a module. If so, the module
will be called ads7828.
+config SENSORS_ADS7871
+ tristate "Texas Instruments ADS7871 A/D converter"
+ depends on SPI
+ help
+ If you say yes here you get support for TI ADS7871 & ADS7870
+
+ This driver can also be built as a module. If so, the module
+ will be called ads7871.
+
config SENSORS_AMC6821
tristate "Texas Instruments AMC6821"
depends on I2C && EXPERIMENTAL
@@ -822,6 +843,16 @@ config SENSORS_THMC50
This driver can also be built as a module. If so, the module
will be called thmc50.
+config SENSORS_TMP102
+ tristate "Texas Instruments TMP102"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for Texas Instruments TMP102
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp102.
+
config SENSORS_TMP401
tristate "Texas Instruments TMP401 and compatibles"
depends on I2C && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 4aa1a3d112ad..2138ceb1a713 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
+obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
@@ -40,6 +41,7 @@ obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
+obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
@@ -89,6 +91,7 @@ obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
+obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 1644b92e7cc4..15c1a9616af3 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -36,6 +36,7 @@
#define ADM1031_REG_FAN_DIV(nr) (0x20 + (nr))
#define ADM1031_REG_PWM (0x22)
#define ADM1031_REG_FAN_MIN(nr) (0x10 + (nr))
+#define ADM1031_REG_FAN_FILTER (0x23)
#define ADM1031_REG_TEMP_OFFSET(nr) (0x0d + (nr))
#define ADM1031_REG_TEMP_MAX(nr) (0x14 + 4 * (nr))
@@ -61,6 +62,9 @@
#define ADM1031_CONF2_TACH2_ENABLE 0x08
#define ADM1031_CONF2_TEMP_ENABLE(chan) (0x10 << (chan))
+#define ADM1031_UPDATE_RATE_MASK 0x1c
+#define ADM1031_UPDATE_RATE_SHIFT 2
+
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
@@ -75,6 +79,7 @@ struct adm1031_data {
int chip_type;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
+ unsigned int update_rate; /* In milliseconds */
/* The chan_select_table contains the possible configurations for
* auto fan control.
*/
@@ -738,6 +743,57 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
+/* Update Rate */
+static const unsigned int update_rates[] = {
+ 16000, 8000, 4000, 2000, 1000, 500, 250, 125,
+};
+
+static ssize_t show_update_rate(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adm1031_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%u\n", data->update_rate);
+}
+
+static ssize_t set_update_rate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adm1031_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ int i, err;
+ u8 reg;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ /* find the nearest update rate from the table */
+ for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) {
+ if (val >= update_rates[i])
+ break;
+ }
+ /* if not found, we point to the last entry (lowest update rate) */
+
+ /* set the new update rate while preserving other settings */
+ reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
+ reg &= ~ADM1031_UPDATE_RATE_MASK;
+ reg |= i << ADM1031_UPDATE_RATE_SHIFT;
+ adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
+
+ mutex_lock(&data->update_lock);
+ data->update_rate = update_rates[i];
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate,
+ set_update_rate);
+
static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_div.dev_attr.attr,
@@ -774,6 +830,7 @@ static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
+ &dev_attr_update_rate.attr,
&dev_attr_alarms.attr,
NULL
@@ -900,6 +957,7 @@ static void adm1031_init_client(struct i2c_client *client)
{
unsigned int read_val;
unsigned int mask;
+ int i;
struct adm1031_data *data = i2c_get_clientdata(client);
mask = (ADM1031_CONF2_PWM1_ENABLE | ADM1031_CONF2_TACH1_ENABLE);
@@ -919,18 +977,24 @@ static void adm1031_init_client(struct i2c_client *client)
ADM1031_CONF1_MONITOR_ENABLE);
}
+ /* Read the chip's update rate */
+ mask = ADM1031_UPDATE_RATE_MASK;
+ read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
+ i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
+ data->update_rate = update_rates[i];
}
static struct adm1031_data *adm1031_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
+ unsigned long next_update;
int chan;
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
+ next_update = data->last_updated + msecs_to_jiffies(data->update_rate);
+ if (time_after(jiffies, next_update) || !data->valid) {
dev_dbg(&client->dev, "Starting adm1031 update\n");
for (chan = 0;
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
new file mode 100644
index 000000000000..b300a2048af1
--- /dev/null
+++ b/drivers/hwmon/ads7871.c
@@ -0,0 +1,253 @@
+/*
+ * ads7871 - driver for TI ADS7871 A/D converter
+ *
+ * Copyright (c) 2010 Paul Thomas <pthomas8589@gmail.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * later as publishhed by the Free Software Foundation.
+ *
+ * You need to have something like this in struct spi_board_info
+ * {
+ * .modalias = "ads7871",
+ * .max_speed_hz = 2*1000*1000,
+ * .chip_select = 0,
+ * .bus_num = 1,
+ * },
+ */
+
+/*From figure 18 in the datasheet*/
+/*Register addresses*/
+#define REG_LS_BYTE 0 /*A/D Output Data, LS Byte*/
+#define REG_MS_BYTE 1 /*A/D Output Data, MS Byte*/
+#define REG_PGA_VALID 2 /*PGA Valid Register*/
+#define REG_AD_CONTROL 3 /*A/D Control Register*/
+#define REG_GAIN_MUX 4 /*Gain/Mux Register*/
+#define REG_IO_STATE 5 /*Digital I/O State Register*/
+#define REG_IO_CONTROL 6 /*Digital I/O Control Register*/
+#define REG_OSC_CONTROL 7 /*Rev/Oscillator Control Register*/
+#define REG_SER_CONTROL 24 /*Serial Interface Control Register*/
+#define REG_ID 31 /*ID Register*/
+
+/*From figure 17 in the datasheet
+* These bits get ORed with the address to form
+* the instruction byte */
+/*Instruction Bit masks*/
+#define INST_MODE_bm (1<<7)
+#define INST_READ_bm (1<<6)
+#define INST_16BIT_bm (1<<5)
+
+/*From figure 18 in the datasheet*/
+/*bit masks for Rev/Oscillator Control Register*/
+#define MUX_CNV_bv 7
+#define MUX_CNV_bm (1<<MUX_CNV_bv)
+#define MUX_M3_bm (1<<3) /*M3 selects single ended*/
+#define MUX_G_bv 4 /*allows for reg = (gain << MUX_G_bv) | ...*/
+
+/*From figure 18 in the datasheet*/
+/*bit masks for Rev/Oscillator Control Register*/
+#define OSC_OSCR_bm (1<<5)
+#define OSC_OSCE_bm (1<<4)
+#define OSC_REFE_bm (1<<3)
+#define OSC_BUFE_bm (1<<2)
+#define OSC_R2V_bm (1<<1)
+#define OSC_RBG_bm (1<<0)
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#define DEVICE_NAME "ads7871"
+
+struct ads7871_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+};
+
+static int ads7871_read_reg8(struct spi_device *spi, int reg)
+{
+ int ret;
+ reg = reg | INST_READ_bm;
+ ret = spi_w8r8(spi, reg);
+ return ret;
+}
+
+static int ads7871_read_reg16(struct spi_device *spi, int reg)
+{
+ int ret;
+ reg = reg | INST_READ_bm | INST_16BIT_bm;
+ ret = spi_w8r16(spi, reg);
+ return ret;
+}
+
+static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val)
+{
+ u8 tmp[2] = {reg, val};
+ return spi_write(spi, tmp, sizeof(tmp));
+}
+
+static ssize_t show_voltage(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int ret, val, i = 0;
+ uint8_t channel, mux_cnv;
+
+ channel = attr->index;
+ /*TODO: add support for conversions
+ *other than single ended with a gain of 1*/
+ /*MUX_M3_bm forces single ended*/
+ /*This is also where the gain of the PGA would be set*/
+ ads7871_write_reg8(spi, REG_GAIN_MUX,
+ (MUX_CNV_bm | MUX_M3_bm | channel));
+
+ ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
+ mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
+ /*on 400MHz arm9 platform the conversion
+ *is already done when we do this test*/
+ while ((i < 2) && mux_cnv) {
+ i++;
+ ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
+ mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
+ msleep_interruptible(1);
+ }
+
+ if (mux_cnv == 0) {
+ val = ads7871_read_reg16(spi, REG_LS_BYTE);
+ /*result in volts*10000 = (val/8192)*2.5*10000*/
+ val = ((val>>2) * 25000) / 8192;
+ return sprintf(buf, "%d\n", val);
+ } else {
+ return -1;
+ }
+}
+
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 3);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 4);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
+static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
+
+static struct attribute *ads7871_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ads7871_group = {
+ .attrs = ads7871_attributes,
+};
+
+static int __devinit ads7871_probe(struct spi_device *spi)
+{
+ int status, ret, err = 0;
+ uint8_t val;
+ struct ads7871_data *pdata;
+
+ dev_dbg(&spi->dev, "probe\n");
+
+ pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
+ if (!pdata) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
+ if (status < 0)
+ goto error_free;
+
+ pdata->hwmon_dev = hwmon_device_register(&spi->dev);
+ if (IS_ERR(pdata->hwmon_dev)) {
+ err = PTR_ERR(pdata->hwmon_dev);
+ goto error_remove;
+ }
+
+ spi_set_drvdata(spi, pdata);
+
+ /* Configure the SPI bus */
+ spi->mode = (SPI_MODE_0);
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ ads7871_write_reg8(spi, REG_SER_CONTROL, 0);
+ ads7871_write_reg8(spi, REG_AD_CONTROL, 0);
+
+ val = (OSC_OSCR_bm | OSC_OSCE_bm | OSC_REFE_bm | OSC_BUFE_bm);
+ ads7871_write_reg8(spi, REG_OSC_CONTROL, val);
+ ret = ads7871_read_reg8(spi, REG_OSC_CONTROL);
+
+ dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
+ /*because there is no other error checking on an SPI bus
+ we need to make sure we really have a chip*/
+ if (val != ret) {
+ err = -ENODEV;
+ goto error_remove;
+ }
+
+ return 0;
+
+error_remove:
+ sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
+error_free:
+ kfree(pdata);
+exit:
+ return err;
+}
+
+static int __devexit ads7871_remove(struct spi_device *spi)
+{
+ struct ads7871_data *pdata = spi_get_drvdata(spi);
+
+ hwmon_device_unregister(pdata->hwmon_dev);
+ sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
+ kfree(pdata);
+ return 0;
+}
+
+static struct spi_driver ads7871_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+
+ .probe = ads7871_probe,
+ .remove = __devexit_p(ads7871_remove),
+};
+
+static int __init ads7871_init(void)
+{
+ return spi_register_driver(&ads7871_driver);
+}
+
+static void __exit ads7871_exit(void)
+{
+ spi_unregister_driver(&ads7871_driver);
+}
+
+module_init(ads7871_init);
+module_exit(ads7871_exit);
+
+MODULE_AUTHOR("Paul Thomas <pthomas8589@gmail.com>");
+MODULE_DESCRIPTION("TI ADS7871 A/D driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 4086c7257f91..f13c843a2964 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -316,7 +316,6 @@ static int __devinit adt7411_probe(struct i2c_client *client,
exit_remove:
sysfs_remove_group(&client->dev.kobj, &adt7411_attr_grp);
exit_free:
- i2c_set_clientdata(client, NULL);
kfree(data);
return ret;
}
@@ -327,7 +326,6 @@ static int __devexit adt7411_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &adt7411_attr_grp);
- i2c_set_clientdata(client, NULL);
kfree(data);
return 0;
}
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index f085c18d2905..b6598aa557a0 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -148,6 +148,20 @@ static const char *temperature_sensors_sets[][41] = {
/* Set 18: MacBook Pro 2,2 */
{ "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
"Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
+/* Set 19: Macbook Pro 5,3 */
+ { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
+ "TG0F", "TG0H", "TG0P", "TG0T", "TN0D", "TN0P", "TTF0", "Th2H",
+ "Tm0P", "Ts0P", "Ts0S", NULL },
+/* Set 20: MacBook Pro 5,4 */
+ { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TN0D",
+ "TN0P", "TTF0", "Th2H", "Ts0P", "Ts0S", NULL },
+/* Set 21: MacBook Pro 6,2 */
+ { "TB0T", "TB1T", "TB2T", "TC0C", "TC0D", "TC0P", "TC1C", "TG0D",
+ "TG0P", "TG0T", "TMCD", "TP0P", "TPCD", "Th1H", "Th2H", "Tm0P",
+ "Ts0P", "Ts0S", NULL },
+/* Set 22: MacBook Pro 7,1 */
+ { "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S",
+ "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL },
};
/* List of keys used to read/write fan speeds */
@@ -646,6 +660,17 @@ out:
return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", left, right);
}
+/* Displays sensor key as label */
+static ssize_t applesmc_show_sensor_label(struct device *dev,
+ struct device_attribute *devattr, char *sysfsbuf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ const char *key =
+ temperature_sensors_sets[applesmc_temperature_set][attr->index];
+
+ return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key);
+}
+
/* Displays degree Celsius * 1000 */
static ssize_t applesmc_show_temperature(struct device *dev,
struct device_attribute *devattr, char *sysfsbuf)
@@ -1113,6 +1138,86 @@ static const struct attribute_group fan_attribute_groups[] = {
/*
* Temperature sensors sysfs entries.
*/
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp9_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp10_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp11_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp12_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp13_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp14_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp15_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp16_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 15);
+static SENSOR_DEVICE_ATTR(temp17_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 16);
+static SENSOR_DEVICE_ATTR(temp18_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 17);
+static SENSOR_DEVICE_ATTR(temp19_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 18);
+static SENSOR_DEVICE_ATTR(temp20_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 19);
+static SENSOR_DEVICE_ATTR(temp21_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 20);
+static SENSOR_DEVICE_ATTR(temp22_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 21);
+static SENSOR_DEVICE_ATTR(temp23_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 22);
+static SENSOR_DEVICE_ATTR(temp24_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 23);
+static SENSOR_DEVICE_ATTR(temp25_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 24);
+static SENSOR_DEVICE_ATTR(temp26_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 25);
+static SENSOR_DEVICE_ATTR(temp27_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 26);
+static SENSOR_DEVICE_ATTR(temp28_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 27);
+static SENSOR_DEVICE_ATTR(temp29_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 28);
+static SENSOR_DEVICE_ATTR(temp30_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 29);
+static SENSOR_DEVICE_ATTR(temp31_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 30);
+static SENSOR_DEVICE_ATTR(temp32_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 31);
+static SENSOR_DEVICE_ATTR(temp33_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 32);
+static SENSOR_DEVICE_ATTR(temp34_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 33);
+static SENSOR_DEVICE_ATTR(temp35_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 34);
+static SENSOR_DEVICE_ATTR(temp36_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 35);
+static SENSOR_DEVICE_ATTR(temp37_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 36);
+static SENSOR_DEVICE_ATTR(temp38_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 37);
+static SENSOR_DEVICE_ATTR(temp39_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 38);
+static SENSOR_DEVICE_ATTR(temp40_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 39);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
applesmc_show_temperature, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
@@ -1194,6 +1299,50 @@ static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO,
static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO,
applesmc_show_temperature, NULL, 39);
+static struct attribute *label_attributes[] = {
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ &sensor_dev_attr_temp5_label.dev_attr.attr,
+ &sensor_dev_attr_temp6_label.dev_attr.attr,
+ &sensor_dev_attr_temp7_label.dev_attr.attr,
+ &sensor_dev_attr_temp8_label.dev_attr.attr,
+ &sensor_dev_attr_temp9_label.dev_attr.attr,
+ &sensor_dev_attr_temp10_label.dev_attr.attr,
+ &sensor_dev_attr_temp11_label.dev_attr.attr,
+ &sensor_dev_attr_temp12_label.dev_attr.attr,
+ &sensor_dev_attr_temp13_label.dev_attr.attr,
+ &sensor_dev_attr_temp14_label.dev_attr.attr,
+ &sensor_dev_attr_temp15_label.dev_attr.attr,
+ &sensor_dev_attr_temp16_label.dev_attr.attr,
+ &sensor_dev_attr_temp17_label.dev_attr.attr,
+ &sensor_dev_attr_temp18_label.dev_attr.attr,
+ &sensor_dev_attr_temp19_label.dev_attr.attr,
+ &sensor_dev_attr_temp20_label.dev_attr.attr,
+ &sensor_dev_attr_temp21_label.dev_attr.attr,
+ &sensor_dev_attr_temp22_label.dev_attr.attr,
+ &sensor_dev_attr_temp23_label.dev_attr.attr,
+ &sensor_dev_attr_temp24_label.dev_attr.attr,
+ &sensor_dev_attr_temp25_label.dev_attr.attr,
+ &sensor_dev_attr_temp26_label.dev_attr.attr,
+ &sensor_dev_attr_temp27_label.dev_attr.attr,
+ &sensor_dev_attr_temp28_label.dev_attr.attr,
+ &sensor_dev_attr_temp29_label.dev_attr.attr,
+ &sensor_dev_attr_temp30_label.dev_attr.attr,
+ &sensor_dev_attr_temp31_label.dev_attr.attr,
+ &sensor_dev_attr_temp32_label.dev_attr.attr,
+ &sensor_dev_attr_temp33_label.dev_attr.attr,
+ &sensor_dev_attr_temp34_label.dev_attr.attr,
+ &sensor_dev_attr_temp35_label.dev_attr.attr,
+ &sensor_dev_attr_temp36_label.dev_attr.attr,
+ &sensor_dev_attr_temp37_label.dev_attr.attr,
+ &sensor_dev_attr_temp38_label.dev_attr.attr,
+ &sensor_dev_attr_temp39_label.dev_attr.attr,
+ &sensor_dev_attr_temp40_label.dev_attr.attr,
+ NULL
+};
+
static struct attribute *temperature_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
@@ -1241,6 +1390,10 @@ static struct attribute *temperature_attributes[] = {
static const struct attribute_group temperature_attributes_group =
{ .attrs = temperature_attributes };
+static const struct attribute_group label_attributes_group = {
+ .attrs = label_attributes
+};
+
/* Module stuff */
/*
@@ -1363,6 +1516,14 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
{ .accelerometer = 0, .light = 0, .temperature_set = 17 },
/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
{ .accelerometer = 1, .light = 1, .temperature_set = 18 },
+/* MacBook Pro 5,3: accelerometer, backlight and temperature set 19 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 19 },
+/* MacBook Pro 5,4: accelerometer, backlight and temperature set 20 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 20 },
+/* MacBook Pro 6,2: accelerometer, backlight and temperature set 21 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 21 },
+/* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 22 },
};
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1376,6 +1537,22 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
&applesmc_dmi_data[7]},
+ { applesmc_dmi_match, "Apple MacBook Pro 7", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro7") },
+ &applesmc_dmi_data[22]},
+ { applesmc_dmi_match, "Apple MacBook Pro 5,4", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4") },
+ &applesmc_dmi_data[20]},
+ { applesmc_dmi_match, "Apple MacBook Pro 5,3", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3") },
+ &applesmc_dmi_data[19]},
+ { applesmc_dmi_match, "Apple MacBook Pro 6", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6") },
+ &applesmc_dmi_data[21]},
{ applesmc_dmi_match, "Apple MacBook Pro 5", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") },
@@ -1518,7 +1695,8 @@ static int __init applesmc_init(void)
for (i = 0;
temperature_sensors_sets[applesmc_temperature_set][i] != NULL;
i++) {
- if (temperature_attributes[i] == NULL) {
+ if (temperature_attributes[i] == NULL ||
+ label_attributes[i] == NULL) {
printk(KERN_ERR "applesmc: More temperature sensors "
"in temperature_sensors_sets (at least %i)"
"than available sysfs files in "
@@ -1530,6 +1708,10 @@ static int __init applesmc_init(void)
temperature_attributes[i]);
if (ret)
goto out_temperature;
+ ret = sysfs_create_file(&pdev->dev.kobj,
+ label_attributes[i]);
+ if (ret)
+ goto out_temperature;
}
if (applesmc_accelerometer) {
@@ -1580,6 +1762,7 @@ out_accelerometer:
if (applesmc_accelerometer)
applesmc_release_accelerometer();
out_temperature:
+ sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
out_fans:
while (fans_handled)
@@ -1609,6 +1792,7 @@ static void __exit applesmc_exit(void)
}
if (applesmc_accelerometer)
applesmc_release_accelerometer();
+ sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
while (fans_handled)
sysfs_remove_group(&pdev->dev.kobj,
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 0f388adc6187..3b973f30b1f6 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -1141,7 +1141,6 @@ exit_remove:
&(asc7621_params[i].sda.dev_attr));
}
- i2c_set_clientdata(client, NULL);
kfree(data);
return err;
}
@@ -1196,7 +1195,6 @@ static int asc7621_remove(struct i2c_client *client)
&(asc7621_params[i].sda.dev_attr));
}
- i2c_set_clientdata(client, NULL);
kfree(data);
return 0;
}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 16c420240724..653db1bda934 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -1411,6 +1411,13 @@ static int __init atk0110_init(void)
{
int ret;
+ /* Make sure it's safe to access the device through ACPI */
+ if (!acpi_resources_are_enforced()) {
+ pr_err("atk: Resources not safely usable due to "
+ "acpi_enforce_resources kernel parameter\n");
+ return -EBUSY;
+ }
+
ret = acpi_bus_register_driver(&atk_driver);
if (ret)
pr_info("atk: acpi_bus_register_driver failed: %d\n", ret);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index e9b7fbc5a447..2988da150ed6 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -241,6 +241,55 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
return tjmax;
}
+static int __devinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
+ struct device *dev)
+{
+ /* The 100C is default for both mobile and non mobile CPUs */
+ int err;
+ u32 eax, edx;
+ u32 val;
+
+ /* A new feature of current Intel(R) processors, the
+ IA32_TEMPERATURE_TARGET contains the TjMax value */
+ err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+ if (err) {
+ dev_warn(dev, "Unable to read TjMax from CPU.\n");
+ } else {
+ val = (eax >> 16) & 0xff;
+ /*
+ * If the TjMax is not plausible, an assumption
+ * will be used
+ */
+ if ((val > 80) && (val < 120)) {
+ dev_info(dev, "TjMax is %d C.\n", val);
+ return val * 1000;
+ }
+ }
+
+ /*
+ * An assumption is made for early CPUs and unreadable MSR.
+ * NOTE: the given value may not be correct.
+ */
+
+ switch (c->x86_model) {
+ case 0xe:
+ case 0xf:
+ case 0x16:
+ case 0x1a:
+ dev_warn(dev, "TjMax is assumed as 100 C!\n");
+ return 100000;
+ break;
+ case 0x17:
+ case 0x1c: /* Atom CPUs */
+ return adjust_tjmax(c, id, dev);
+ break;
+ default:
+ dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
+ " using default TjMax of 100C.\n", c->x86_model);
+ return 100000;
+ }
+}
+
static int __devinit coretemp_probe(struct platform_device *pdev)
{
struct coretemp_data *data;
@@ -283,14 +332,18 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
}
}
- data->tjmax = adjust_tjmax(c, data->id, &pdev->dev);
+ data->tjmax = get_tjmax(c, data->id, &pdev->dev);
platform_set_drvdata(pdev, data);
- /* read the still undocumented IA32_TEMPERATURE_TARGET it exists
- on older CPUs but not in this register, Atoms don't have it either */
+ /*
+ * read the still undocumented IA32_TEMPERATURE_TARGET. It exists
+ * on older CPUs but not in this register,
+ * Atoms don't have it either.
+ */
if ((c->x86_model > 0xe) && (c->x86_model != 0x1c)) {
- err = rdmsr_safe_on_cpu(data->id, 0x1a2, &eax, &edx);
+ err = rdmsr_safe_on_cpu(data->id, MSR_IA32_TEMPERATURE_TARGET,
+ &eax, &edx);
if (err) {
dev_warn(&pdev->dev, "Unable to read"
" IA32_TEMPERATURE_TARGET MSR\n");
@@ -451,28 +504,20 @@ static int __init coretemp_init(void)
for_each_online_cpu(i) {
struct cpuinfo_x86 *c = &cpu_data(i);
+ /*
+ * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+ * sensors. We check this bit only, all the early CPUs
+ * without thermal sensors will be filtered out.
+ */
+ if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) {
+ err = coretemp_device_add(i);
+ if (err)
+ goto exit_devices_unreg;
- /* check if family 6, models 0xe (Pentium M DC),
- 0xf (Core 2 DC 65nm), 0x16 (Core 2 SC 65nm),
- 0x17 (Penryn 45nm), 0x1a (Nehalem), 0x1c (Atom),
- 0x1e (Lynnfield) */
- if ((c->cpuid_level < 0) || (c->x86 != 0x6) ||
- !((c->x86_model == 0xe) || (c->x86_model == 0xf) ||
- (c->x86_model == 0x16) || (c->x86_model == 0x17) ||
- (c->x86_model == 0x1a) || (c->x86_model == 0x1c) ||
- (c->x86_model == 0x1e))) {
-
- /* supported CPU not found, but report the unknown
- family 6 CPU */
- if ((c->x86 == 0x6) && (c->x86_model > 0xf))
- printk(KERN_WARNING DRVNAME ": Unknown CPU "
- "model 0x%x\n", c->x86_model);
- continue;
+ } else {
+ printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
+ " has no thermal sensor.\n", c->x86_model);
}
-
- err = coretemp_device_add(i);
- if (err)
- goto exit_devices_unreg;
}
if (list_empty(&pdev_list)) {
err = -ENODEV;
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 823dd28a902c..980c17d5eeae 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -1,12 +1,14 @@
/*
- * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x and
- * SCH5027 Super-I/O chips integrated hardware monitoring features.
- * Copyright (c) 2007, 2008 Juerg Haefliger <juergh@gmail.com>
+ * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x, SCH5027,
+ * and SCH5127 Super-I/O chips integrated hardware monitoring
+ * features.
+ * Copyright (c) 2007, 2008, 2009, 2010 Juerg Haefliger <juergh@gmail.com>
*
* This driver is an I2C/ISA hybrid, meaning that it uses the I2C bus to access
* the chip registers if a DME1737, A8000, or SCH5027 is found and the ISA bus
- * if a SCH311x chip is found. Both types of chips have very similar hardware
- * monitoring capabilities but differ in the way they can be accessed.
+ * if a SCH311x or SCH5127 chip is found. Both types of chips have very
+ * similar hardware monitoring capabilities but differ in the way they can be
+ * accessed.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -57,7 +59,7 @@ MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC "
/* Addresses to scan */
static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END};
-enum chips { dme1737, sch5027, sch311x };
+enum chips { dme1737, sch5027, sch311x, sch5127 };
/* ---------------------------------------------------------------------
* Registers
@@ -164,10 +166,29 @@ static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
#define DME1737_VERSTEP_MASK 0xf8
#define SCH311X_DEVICE 0x8c
#define SCH5027_VERSTEP 0x69
+#define SCH5127_DEVICE 0x8e
+
+/* Device ID values (global configuration register index 0x20) */
+#define DME1737_ID_1 0x77
+#define DME1737_ID_2 0x78
+#define SCH3112_ID 0x7c
+#define SCH3114_ID 0x7d
+#define SCH3116_ID 0x7f
+#define SCH5027_ID 0x89
+#define SCH5127_ID 0x86
/* Length of ISA address segment */
#define DME1737_EXTENT 2
+/* chip-dependent features */
+#define HAS_TEMP_OFFSET (1 << 0) /* bit 0 */
+#define HAS_VID (1 << 1) /* bit 1 */
+#define HAS_ZONE3 (1 << 2) /* bit 2 */
+#define HAS_ZONE_HYST (1 << 3) /* bit 3 */
+#define HAS_PWM_MIN (1 << 4) /* bit 4 */
+#define HAS_FAN(ix) (1 << ((ix) + 5)) /* bits 5-10 */
+#define HAS_PWM(ix) (1 << ((ix) + 11)) /* bits 11-16 */
+
/* ---------------------------------------------------------------------
* Data structures and manipulation thereof
* --------------------------------------------------------------------- */
@@ -187,8 +208,7 @@ struct dme1737_data {
u8 vid;
u8 pwm_rr_en;
- u8 has_pwm;
- u8 has_fan;
+ u32 has_features;
/* Register values */
u16 in[7];
@@ -224,8 +244,11 @@ static const int IN_NOMINAL_SCH311x[] = {2500, 1500, 3300, 5000, 12000, 3300,
3300};
static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300,
3300};
+static const int IN_NOMINAL_SCH5127[] = {2500, 2250, 3300, 1125, 1125, 3300,
+ 3300};
#define IN_NOMINAL(type) ((type) == sch311x ? IN_NOMINAL_SCH311x : \
(type) == sch5027 ? IN_NOMINAL_SCH5027 : \
+ (type) == sch5127 ? IN_NOMINAL_SCH5127 : \
IN_NOMINAL_DME1737)
/* Voltage input
@@ -568,7 +591,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
/* Sample register contents every 1 sec */
if (time_after(jiffies, data->last_update + HZ) || !data->valid) {
- if (data->type == dme1737) {
+ if (data->has_features & HAS_VID) {
data->vid = dme1737_read(data, DME1737_REG_VID) &
0x3f;
}
@@ -599,7 +622,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
DME1737_REG_TEMP_MIN(ix));
data->temp_max[ix] = dme1737_read(data,
DME1737_REG_TEMP_MAX(ix));
- if (data->type != sch5027) {
+ if (data->has_features & HAS_TEMP_OFFSET) {
data->temp_offset[ix] = dme1737_read(data,
DME1737_REG_TEMP_OFFSET(ix));
}
@@ -626,7 +649,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) {
/* Skip reading registers if optional fans are not
* present */
- if (!(data->has_fan & (1 << ix))) {
+ if (!(data->has_features & HAS_FAN(ix))) {
continue;
}
data->fan[ix] = dme1737_read(data,
@@ -650,7 +673,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
for (ix = 0; ix < ARRAY_SIZE(data->pwm); ix++) {
/* Skip reading registers if optional PWMs are not
* present */
- if (!(data->has_pwm & (1 << ix))) {
+ if (!(data->has_features & HAS_PWM(ix))) {
continue;
}
data->pwm[ix] = dme1737_read(data,
@@ -672,12 +695,24 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
/* Thermal zone registers */
for (ix = 0; ix < ARRAY_SIZE(data->zone_low); ix++) {
- data->zone_low[ix] = dme1737_read(data,
- DME1737_REG_ZONE_LOW(ix));
- data->zone_abs[ix] = dme1737_read(data,
- DME1737_REG_ZONE_ABS(ix));
+ /* Skip reading registers if zone3 is not present */
+ if ((ix == 2) && !(data->has_features & HAS_ZONE3)) {
+ continue;
+ }
+ /* sch5127 zone2 registers are special */
+ if ((ix == 1) && (data->type == sch5127)) {
+ data->zone_low[1] = dme1737_read(data,
+ DME1737_REG_ZONE_LOW(2));
+ data->zone_abs[1] = dme1737_read(data,
+ DME1737_REG_ZONE_ABS(2));
+ } else {
+ data->zone_low[ix] = dme1737_read(data,
+ DME1737_REG_ZONE_LOW(ix));
+ data->zone_abs[ix] = dme1737_read(data,
+ DME1737_REG_ZONE_ABS(ix));
+ }
}
- if (data->type != sch5027) {
+ if (data->has_features & HAS_ZONE_HYST) {
for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) {
data->zone_hyst[ix] = dme1737_read(data,
DME1737_REG_ZONE_HYST(ix));
@@ -1594,10 +1629,6 @@ static struct attribute *dme1737_attr[] ={
&sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_channels_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr,
NULL
};
@@ -1605,27 +1636,23 @@ static const struct attribute_group dme1737_group = {
.attrs = dme1737_attr,
};
-/* The following struct holds misc attributes, which are not available in all
- * chips. Their creation depends on the chip type which is determined during
- * module load. */
-static struct attribute *dme1737_misc_attr[] = {
- /* Temperatures */
+/* The following struct holds temp offset attributes, which are not available
+ * in all chips. The following chips support them:
+ * DME1737, SCH311x */
+static struct attribute *dme1737_temp_offset_attr[] = {
&sensor_dev_attr_temp1_offset.dev_attr.attr,
&sensor_dev_attr_temp2_offset.dev_attr.attr,
&sensor_dev_attr_temp3_offset.dev_attr.attr,
- /* Zones */
- &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr,
- &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr,
NULL
};
-static const struct attribute_group dme1737_misc_group = {
- .attrs = dme1737_misc_attr,
+static const struct attribute_group dme1737_temp_offset_group = {
+ .attrs = dme1737_temp_offset_attr,
};
-/* The following struct holds VID-related attributes. Their creation
- depends on the chip type which is determined during module load. */
+/* The following struct holds VID related attributes, which are not available
+ * in all chips. The following chips support them:
+ * DME1737 */
static struct attribute *dme1737_vid_attr[] = {
&dev_attr_vrm.attr,
&dev_attr_cpu0_vid.attr,
@@ -1636,6 +1663,36 @@ static const struct attribute_group dme1737_vid_group = {
.attrs = dme1737_vid_attr,
};
+/* The following struct holds temp zone 3 related attributes, which are not
+ * available in all chips. The following chips support them:
+ * DME1737, SCH311x, SCH5027 */
+static struct attribute *dme1737_zone3_attr[] = {
+ &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dme1737_zone3_group = {
+ .attrs = dme1737_zone3_attr,
+};
+
+
+/* The following struct holds temp zone hysteresis related attributes, which
+ * are not available in all chips. The following chips support them:
+ * DME1737, SCH311x */
+static struct attribute *dme1737_zone_hyst_attr[] = {
+ &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dme1737_zone_hyst_group = {
+ .attrs = dme1737_zone_hyst_attr,
+};
+
/* The following structs hold the PWM attributes, some of which are optional.
* Their creation depends on the chip configuration which is determined during
* module load. */
@@ -1691,10 +1748,10 @@ static const struct attribute_group dme1737_pwm_group[] = {
{ .attrs = dme1737_pwm6_attr },
};
-/* The following struct holds misc PWM attributes, which are not available in
- * all chips. Their creation depends on the chip type which is determined
+/* The following struct holds auto PWM min attributes, which are not available
+ * in all chips. Their creation depends on the chip type which is determined
* during module load. */
-static struct attribute *dme1737_pwm_misc_attr[] = {
+static struct attribute *dme1737_auto_pwm_min_attr[] = {
&sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr,
@@ -1764,14 +1821,25 @@ static struct attribute *dme1737_zone_chmod_attr[] = {
&sensor_dev_attr_zone2_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dme1737_zone_chmod_group = {
+ .attrs = dme1737_zone_chmod_attr,
+};
+
+
+/* The permissions of the following zone 3 attributes are changed to read-
+ * writeable if the chip is *not* locked. Otherwise they stay read-only. */
+static struct attribute *dme1737_zone3_chmod_attr[] = {
&sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
NULL
};
-static const struct attribute_group dme1737_zone_chmod_group = {
- .attrs = dme1737_zone_chmod_attr,
+static const struct attribute_group dme1737_zone3_chmod_group = {
+ .attrs = dme1737_zone3_chmod_attr,
};
/* The permissions of the following PWM attributes are changed to read-
@@ -1887,30 +1955,35 @@ static void dme1737_remove_files(struct device *dev)
int ix;
for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
- if (data->has_fan & (1 << ix)) {
+ if (data->has_features & HAS_FAN(ix)) {
sysfs_remove_group(&dev->kobj,
&dme1737_fan_group[ix]);
}
}
for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
- if (data->has_pwm & (1 << ix)) {
+ if (data->has_features & HAS_PWM(ix)) {
sysfs_remove_group(&dev->kobj,
&dme1737_pwm_group[ix]);
- if (data->type != sch5027 && ix < 3) {
+ if ((data->has_features & HAS_PWM_MIN) && ix < 3) {
sysfs_remove_file(&dev->kobj,
- dme1737_pwm_misc_attr[ix]);
+ dme1737_auto_pwm_min_attr[ix]);
}
}
}
- if (data->type != sch5027) {
- sysfs_remove_group(&dev->kobj, &dme1737_misc_group);
+ if (data->has_features & HAS_TEMP_OFFSET) {
+ sysfs_remove_group(&dev->kobj, &dme1737_temp_offset_group);
}
- if (data->type == dme1737) {
+ if (data->has_features & HAS_VID) {
sysfs_remove_group(&dev->kobj, &dme1737_vid_group);
}
-
+ if (data->has_features & HAS_ZONE3) {
+ sysfs_remove_group(&dev->kobj, &dme1737_zone3_group);
+ }
+ if (data->has_features & HAS_ZONE_HYST) {
+ sysfs_remove_group(&dev->kobj, &dme1737_zone_hyst_group);
+ }
sysfs_remove_group(&dev->kobj, &dme1737_group);
if (!data->client) {
@@ -1934,23 +2007,31 @@ static int dme1737_create_files(struct device *dev)
goto exit_remove;
}
- /* Create misc sysfs attributes */
- if ((data->type != sch5027) &&
+ /* Create chip-dependent sysfs attributes */
+ if ((data->has_features & HAS_TEMP_OFFSET) &&
(err = sysfs_create_group(&dev->kobj,
- &dme1737_misc_group))) {
+ &dme1737_temp_offset_group))) {
goto exit_remove;
}
-
- /* Create VID-related sysfs attributes */
- if ((data->type == dme1737) &&
+ if ((data->has_features & HAS_VID) &&
(err = sysfs_create_group(&dev->kobj,
&dme1737_vid_group))) {
goto exit_remove;
}
+ if ((data->has_features & HAS_ZONE3) &&
+ (err = sysfs_create_group(&dev->kobj,
+ &dme1737_zone3_group))) {
+ goto exit_remove;
+ }
+ if ((data->has_features & HAS_ZONE_HYST) &&
+ (err = sysfs_create_group(&dev->kobj,
+ &dme1737_zone_hyst_group))) {
+ goto exit_remove;
+ }
/* Create fan sysfs attributes */
for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
- if (data->has_fan & (1 << ix)) {
+ if (data->has_features & HAS_FAN(ix)) {
if ((err = sysfs_create_group(&dev->kobj,
&dme1737_fan_group[ix]))) {
goto exit_remove;
@@ -1960,14 +2041,14 @@ static int dme1737_create_files(struct device *dev)
/* Create PWM sysfs attributes */
for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
- if (data->has_pwm & (1 << ix)) {
+ if (data->has_features & HAS_PWM(ix)) {
if ((err = sysfs_create_group(&dev->kobj,
&dme1737_pwm_group[ix]))) {
goto exit_remove;
}
- if (data->type != sch5027 && ix < 3 &&
+ if ((data->has_features & HAS_PWM_MIN) && ix < 3 &&
(err = sysfs_create_file(&dev->kobj,
- dme1737_pwm_misc_attr[ix]))) {
+ dme1737_auto_pwm_min_attr[ix]))) {
goto exit_remove;
}
}
@@ -1983,21 +2064,30 @@ static int dme1737_create_files(struct device *dev)
dme1737_chmod_group(dev, &dme1737_zone_chmod_group,
S_IRUGO | S_IWUSR);
- /* Change permissions of misc sysfs attributes */
- if (data->type != sch5027) {
- dme1737_chmod_group(dev, &dme1737_misc_group,
+ /* Change permissions of chip-dependent sysfs attributes */
+ if (data->has_features & HAS_TEMP_OFFSET) {
+ dme1737_chmod_group(dev, &dme1737_temp_offset_group,
+ S_IRUGO | S_IWUSR);
+ }
+ if (data->has_features & HAS_ZONE3) {
+ dme1737_chmod_group(dev, &dme1737_zone3_chmod_group,
+ S_IRUGO | S_IWUSR);
+ }
+ if (data->has_features & HAS_ZONE_HYST) {
+ dme1737_chmod_group(dev, &dme1737_zone_hyst_group,
S_IRUGO | S_IWUSR);
}
/* Change permissions of PWM sysfs attributes */
for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_chmod_group); ix++) {
- if (data->has_pwm & (1 << ix)) {
+ if (data->has_features & HAS_PWM(ix)) {
dme1737_chmod_group(dev,
&dme1737_pwm_chmod_group[ix],
S_IRUGO | S_IWUSR);
- if (data->type != sch5027 && ix < 3) {
+ if ((data->has_features & HAS_PWM_MIN) &&
+ ix < 3) {
dme1737_chmod_file(dev,
- dme1737_pwm_misc_attr[ix],
+ dme1737_auto_pwm_min_attr[ix],
S_IRUGO | S_IWUSR);
}
}
@@ -2005,7 +2095,7 @@ static int dme1737_create_files(struct device *dev)
/* Change permissions of pwm[1-3] if in manual mode */
for (ix = 0; ix < 3; ix++) {
- if ((data->has_pwm & (1 << ix)) &&
+ if ((data->has_features & HAS_PWM(ix)) &&
(PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) {
dme1737_chmod_file(dev,
dme1737_pwm_chmod_attr[ix],
@@ -2052,20 +2142,20 @@ static int dme1737_init_device(struct device *dev)
return -EFAULT;
}
- /* Determine which optional fan and pwm features are enabled/present */
+ /* Determine which optional fan and pwm features are enabled (only
+ * valid for I2C devices) */
if (client) { /* I2C chip */
data->config2 = dme1737_read(data, DME1737_REG_CONFIG2);
/* Check if optional fan3 input is enabled */
if (data->config2 & 0x04) {
- data->has_fan |= (1 << 2);
+ data->has_features |= HAS_FAN(2);
}
/* Fan4 and pwm3 are only available if the client's I2C address
* is the default 0x2e. Otherwise the I/Os associated with
* these functions are used for addr enable/select. */
if (client->addr == 0x2e) {
- data->has_fan |= (1 << 3);
- data->has_pwm |= (1 << 2);
+ data->has_features |= HAS_FAN(3) | HAS_PWM(2);
}
/* Determine which of the optional fan[5-6] and pwm[5-6]
@@ -2077,26 +2167,40 @@ static int dme1737_init_device(struct device *dev)
dev_warn(dev, "Failed to query Super-IO for optional "
"features.\n");
}
- } else { /* ISA chip */
- /* Fan3 and pwm3 are always available. Fan[4-5] and pwm[5-6]
- * don't exist in the ISA chip. */
- data->has_fan |= (1 << 2);
- data->has_pwm |= (1 << 2);
}
- /* Fan1, fan2, pwm1, and pwm2 are always present */
- data->has_fan |= 0x03;
- data->has_pwm |= 0x03;
+ /* Fan[1-2] and pwm[1-2] are present in all chips */
+ data->has_features |= HAS_FAN(0) | HAS_FAN(1) | HAS_PWM(0) | HAS_PWM(1);
+
+ /* Chip-dependent features */
+ switch (data->type) {
+ case dme1737:
+ data->has_features |= HAS_TEMP_OFFSET | HAS_VID | HAS_ZONE3 |
+ HAS_ZONE_HYST | HAS_PWM_MIN;
+ break;
+ case sch311x:
+ data->has_features |= HAS_TEMP_OFFSET | HAS_ZONE3 |
+ HAS_ZONE_HYST | HAS_PWM_MIN | HAS_FAN(2) | HAS_PWM(2);
+ break;
+ case sch5027:
+ data->has_features |= HAS_ZONE3;
+ break;
+ case sch5127:
+ data->has_features |= HAS_FAN(2) | HAS_PWM(2);
+ break;
+ default:
+ break;
+ }
dev_info(dev, "Optional features: pwm3=%s, pwm5=%s, pwm6=%s, "
"fan3=%s, fan4=%s, fan5=%s, fan6=%s.\n",
- (data->has_pwm & (1 << 2)) ? "yes" : "no",
- (data->has_pwm & (1 << 4)) ? "yes" : "no",
- (data->has_pwm & (1 << 5)) ? "yes" : "no",
- (data->has_fan & (1 << 2)) ? "yes" : "no",
- (data->has_fan & (1 << 3)) ? "yes" : "no",
- (data->has_fan & (1 << 4)) ? "yes" : "no",
- (data->has_fan & (1 << 5)) ? "yes" : "no");
+ (data->has_features & HAS_PWM(2)) ? "yes" : "no",
+ (data->has_features & HAS_PWM(4)) ? "yes" : "no",
+ (data->has_features & HAS_PWM(5)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(2)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(3)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(4)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(5)) ? "yes" : "no");
reg = dme1737_read(data, DME1737_REG_TACH_PWM);
/* Inform if fan-to-pwm mapping differs from the default */
@@ -2122,7 +2226,7 @@ static int dme1737_init_device(struct device *dev)
for (ix = 0; ix < 3; ix++) {
data->pwm_config[ix] = dme1737_read(data,
DME1737_REG_PWM_CONFIG(ix));
- if ((data->has_pwm & (1 << ix)) &&
+ if ((data->has_features & HAS_PWM(ix)) &&
(PWM_EN_FROM_REG(data->pwm_config[ix]) == -1)) {
dev_info(dev, "Switching pwm%d to "
"manual mode.\n", ix + 1);
@@ -2142,7 +2246,7 @@ static int dme1737_init_device(struct device *dev)
data->pwm_acz[2] = 4; /* pwm3 -> zone3 */
/* Set VRM */
- if (data->type == dme1737) {
+ if (data->has_features & HAS_VID) {
data->vrm = vid_which_vrm();
}
@@ -2163,10 +2267,10 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data)
dme1737_sio_enter(sio_cip);
/* Check device ID
- * The DME1737 can return either 0x78 or 0x77 as its device ID.
- * The SCH5027 returns 0x89 as its device ID. */
+ * We currently know about two kinds of DME1737 and SCH5027. */
reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20);
- if (!(reg == 0x77 || reg == 0x78 || reg == 0x89)) {
+ if (!(reg == DME1737_ID_1 || reg == DME1737_ID_2 ||
+ reg == SCH5027_ID)) {
err = -ENODEV;
goto exit;
}
@@ -2185,16 +2289,16 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data)
* are enabled and available. Bits [3:2] of registers 0x43-0x46 are set
* to '10' if the respective feature is enabled. */
if ((inb(addr + 0x43) & 0x0c) == 0x08) { /* fan6 */
- data->has_fan |= (1 << 5);
+ data->has_features |= HAS_FAN(5);
}
if ((inb(addr + 0x44) & 0x0c) == 0x08) { /* pwm6 */
- data->has_pwm |= (1 << 5);
+ data->has_features |= HAS_PWM(5);
}
if ((inb(addr + 0x45) & 0x0c) == 0x08) { /* fan5 */
- data->has_fan |= (1 << 4);
+ data->has_features |= HAS_FAN(4);
}
if ((inb(addr + 0x46) & 0x0c) == 0x08) { /* pwm5 */
- data->has_pwm |= (1 << 4);
+ data->has_features |= HAS_PWM(4);
}
exit:
@@ -2222,7 +2326,6 @@ static int dme1737_i2c_detect(struct i2c_client *client,
if (company == DME1737_COMPANY_SMSC &&
verstep == SCH5027_VERSTEP) {
name = "sch5027";
-
} else if (company == DME1737_COMPANY_SMSC &&
(verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP) {
name = "dme1737";
@@ -2329,10 +2432,10 @@ static int __init dme1737_isa_detect(int sio_cip, unsigned short *addr)
dme1737_sio_enter(sio_cip);
/* Check device ID
- * We currently know about SCH3112 (0x7c), SCH3114 (0x7d), and
- * SCH3116 (0x7f). */
+ * We currently know about SCH3112, SCH3114, SCH3116, and SCH5127 */
reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20);
- if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) {
+ if (!(reg == SCH3112_ID || reg == SCH3114_ID || reg == SCH3116_ID ||
+ reg == SCH5127_ID)) {
err = -ENODEV;
goto exit;
}
@@ -2424,23 +2527,42 @@ static int __devinit dme1737_isa_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
/* Skip chip detection if module is loaded with force_id parameter */
- if (!force_id) {
+ switch (force_id) {
+ case SCH3112_ID:
+ case SCH3114_ID:
+ case SCH3116_ID:
+ data->type = sch311x;
+ break;
+ case SCH5127_ID:
+ data->type = sch5127;
+ break;
+ default:
company = dme1737_read(data, DME1737_REG_COMPANY);
device = dme1737_read(data, DME1737_REG_DEVICE);
- if (!((company == DME1737_COMPANY_SMSC) &&
- (device == SCH311X_DEVICE))) {
+ if ((company == DME1737_COMPANY_SMSC) &&
+ (device == SCH311X_DEVICE)) {
+ data->type = sch311x;
+ } else if ((company == DME1737_COMPANY_SMSC) &&
+ (device == SCH5127_DEVICE)) {
+ data->type = sch5127;
+ } else {
err = -ENODEV;
goto exit_kfree;
}
}
- data->type = sch311x;
- /* Fill in the remaining client fields and initialize the mutex */
- data->name = "sch311x";
+ if (data->type == sch5127) {
+ data->name = "sch5127";
+ } else {
+ data->name = "sch311x";
+ }
+
+ /* Initialize the mutex */
mutex_init(&data->update_lock);
- dev_info(dev, "Found a SCH311x chip at 0x%04x\n", data->addr);
+ dev_info(dev, "Found a %s chip at 0x%04x\n",
+ data->type == sch5127 ? "SCH5127" : "SCH311x", data->addr);
/* Initialize the chip */
if ((err = dme1737_init_device(dev))) {
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
new file mode 100644
index 000000000000..0e4b5642638d
--- /dev/null
+++ b/drivers/hwmon/emc1403.c
@@ -0,0 +1,344 @@
+/*
+ * emc1403.c - SMSC Thermal Driver
+ *
+ * Copyright (C) 2008 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * TODO
+ * - cache alarm and critical limit registers
+ * - add emc1404 support
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/mutex.h>
+
+#define THERMAL_PID_REG 0xfd
+#define THERMAL_SMSC_ID_REG 0xfe
+#define THERMAL_REVISION_REG 0xff
+
+struct thermal_data {
+ struct device *hwmon_dev;
+ struct mutex mutex;
+ /* Cache the hyst value so we don't keep re-reading it. In theory
+ we could cache it forever as nobody else should be writing it. */
+ u8 cached_hyst;
+ unsigned long hyst_valid;
+};
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval = i2c_smbus_read_byte_data(client, sda->index);
+
+ if (retval < 0)
+ return retval;
+ return sprintf(buf, "%d000\n", retval);
+}
+
+static ssize_t show_bit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
+ int retval = i2c_smbus_read_byte_data(client, sda->nr);
+
+ if (retval < 0)
+ return retval;
+ retval &= sda->index;
+ return sprintf(buf, "%d\n", retval ? 1 : 0);
+}
+
+static ssize_t store_temp(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ int retval;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+ retval = i2c_smbus_write_byte_data(client, sda->index,
+ DIV_ROUND_CLOSEST(val, 1000));
+ if (retval < 0)
+ return retval;
+ return count;
+}
+
+static ssize_t show_hyst(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct thermal_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval;
+ int hyst;
+
+ retval = i2c_smbus_read_byte_data(client, sda->index);
+ if (retval < 0)
+ return retval;
+
+ if (time_after(jiffies, data->hyst_valid)) {
+ hyst = i2c_smbus_read_byte_data(client, 0x21);
+ if (hyst < 0)
+ return retval;
+ data->cached_hyst = hyst;
+ data->hyst_valid = jiffies + HZ;
+ }
+ return sprintf(buf, "%d000\n", retval - data->cached_hyst);
+}
+
+static ssize_t store_hyst(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct thermal_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval;
+ int hyst;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ retval = i2c_smbus_read_byte_data(client, sda->index);
+ if (retval < 0)
+ goto fail;
+
+ hyst = val - retval * 1000;
+ hyst = DIV_ROUND_CLOSEST(hyst, 1000);
+ if (hyst < 0 || hyst > 255) {
+ retval = -ERANGE;
+ goto fail;
+ }
+
+ retval = i2c_smbus_write_byte_data(client, 0x21, hyst);
+ if (retval == 0) {
+ retval = count;
+ data->cached_hyst = hyst;
+ data->hyst_valid = jiffies + HZ;
+ }
+fail:
+ mutex_unlock(&data->mutex);
+ return retval;
+}
+
+/*
+ * Sensors. We pass the actual i2c register to the methods.
+ */
+
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x06);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x05);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x20);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0x00);
+static SENSOR_DEVICE_ATTR_2(temp1_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp1_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x01);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x20);
+
+static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x08);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x07);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x19);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp2_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x02);
+static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x02);
+static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x02);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x19);
+
+static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x16);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x15);
+static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x1A);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 0x23);
+static SENSOR_DEVICE_ATTR_2(temp3_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x04);
+static SENSOR_DEVICE_ATTR_2(temp3_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x04);
+static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x04);
+static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x1A);
+
+static struct attribute *mid_att_thermal[] = {
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group m_thermal_gr = {
+ .attrs = mid_att_thermal
+};
+
+static int emc1403_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ int id;
+ /* Check if thermal chip is SMSC and EMC1403 */
+
+ id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG);
+ if (id != 0x5d)
+ return -ENODEV;
+
+ /* Note: 0x25 is the 1404 which is very similar and this
+ driver could be extended */
+ id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
+ if (id != 0x21)
+ return -ENODEV;
+
+ id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
+ if (id != 0x01)
+ return -ENODEV;
+
+ strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
+ return 0;
+}
+
+static int emc1403_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+ struct thermal_data *data;
+
+ data = kzalloc(sizeof(struct thermal_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_warn(&client->dev, "out of memory");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->mutex);
+ data->hyst_valid = jiffies - 1; /* Expired */
+
+ res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
+ if (res) {
+ dev_warn(&client->dev, "create group failed\n");
+ hwmon_device_unregister(data->hwmon_dev);
+ goto thermal_error1;
+ }
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ res = PTR_ERR(data->hwmon_dev);
+ dev_warn(&client->dev, "register hwmon dev failed\n");
+ goto thermal_error2;
+ }
+ dev_info(&client->dev, "EMC1403 Thermal chip found\n");
+ return res;
+
+thermal_error2:
+ sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+thermal_error1:
+ kfree(data);
+ return res;
+}
+
+static int emc1403_remove(struct i2c_client *client)
+{
+ struct thermal_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+ kfree(data);
+ return 0;
+}
+
+static const unsigned short emc1403_address_list[] = {
+ 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END
+};
+
+static const struct i2c_device_id emc1403_idtable[] = {
+ { "emc1403", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
+
+static struct i2c_driver sensor_emc1403 = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "emc1403",
+ },
+ .detect = emc1403_detect,
+ .probe = emc1403_probe,
+ .remove = emc1403_remove,
+ .id_table = emc1403_idtable,
+ .address_list = emc1403_address_list,
+};
+
+static int __init sensor_emc1403_init(void)
+{
+ return i2c_add_driver(&sensor_emc1403);
+}
+
+static void __exit sensor_emc1403_exit(void)
+{
+ i2c_del_driver(&sensor_emc1403);
+}
+
+module_init(sensor_emc1403_init);
+module_exit(sensor_emc1403_exit);
+
+MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
+MODULE_DESCRIPTION("emc1403 Thermal Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index a95fa4256caa..537841ef44b9 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -856,21 +856,19 @@ static inline int superio_inb(int base, int reg)
static int superio_inw(int base, int reg)
{
int val;
- outb(reg++, base);
- val = inb(base + 1) << 8;
- outb(reg, base);
- val |= inb(base + 1);
+ val = superio_inb(base, reg) << 8;
+ val |= superio_inb(base, reg + 1);
return val;
}
static inline void superio_enter(int base)
{
/* according to the datasheet the key must be send twice! */
- outb( SIO_UNLOCK_KEY, base);
- outb( SIO_UNLOCK_KEY, base);
+ outb(SIO_UNLOCK_KEY, base);
+ outb(SIO_UNLOCK_KEY, base);
}
-static inline void superio_select( int base, int ld)
+static inline void superio_select(int base, int ld)
{
outb(SIO_REG_LDSEL, base);
outb(ld, base + 1);
@@ -905,10 +903,8 @@ static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg)
{
u16 val;
- outb(reg++, data->addr + ADDR_REG_OFFSET);
- val = inb(data->addr + DATA_REG_OFFSET) << 8;
- outb(reg, data->addr + ADDR_REG_OFFSET);
- val |= inb(data->addr + DATA_REG_OFFSET);
+ val = f71882fg_read8(data, reg) << 8;
+ val |= f71882fg_read8(data, reg + 1);
return val;
}
@@ -921,10 +917,8 @@ static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val)
static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val)
{
- outb(reg++, data->addr + ADDR_REG_OFFSET);
- outb(val >> 8, data->addr + DATA_REG_OFFSET);
- outb(reg, data->addr + ADDR_REG_OFFSET);
- outb(val & 255, data->addr + DATA_REG_OFFSET);
+ f71882fg_write8(data, reg, val >> 8);
+ f71882fg_write8(data, reg + 1, val & 0xff);
}
static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
@@ -945,7 +939,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
mutex_lock(&data->update_lock);
/* Update once every 60 seconds */
- if ( time_after(jiffies, data->last_limits + 60 * HZ ) ||
+ if (time_after(jiffies, data->last_limits + 60 * HZ) ||
!data->valid) {
if (data->type == f71882fg || data->type == f71889fg) {
data->in1_max =
@@ -1127,8 +1121,12 @@ static ssize_t store_fan_full_speed(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
val = SENSORS_LIMIT(val, 23, 1500000);
val = fan_to_reg(val);
@@ -1157,8 +1155,12 @@ static ssize_t store_fan_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP);
@@ -1206,7 +1208,14 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- long val = simple_strtol(buf, NULL, 10) / 8;
+ int err;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 8;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1233,8 +1242,12 @@ static ssize_t store_in_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
@@ -1299,8 +1312,14 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1333,10 +1352,16 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
ssize_t ret = count;
u8 reg;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
mutex_lock(&data->update_lock);
@@ -1372,8 +1397,14 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1427,8 +1458,12 @@ static ssize_t store_temp_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP);
@@ -1490,8 +1525,13 @@ static ssize_t store_pwm(struct device *dev,
size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1551,8 +1591,12 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
/* Special case for F8000 pwm channel 3 which only does auto mode */
if (data->type == f8000 && nr == 2 && val != 2)
@@ -1626,9 +1670,14 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10);
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1674,10 +1723,16 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10) / 1000;
u8 reg;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
mutex_lock(&data->update_lock);
data->pwm_auto_point_temp[nr][point] =
@@ -1716,8 +1771,12 @@ static ssize_t store_pwm_interpolate(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->pwm_auto_point_mapping[nr] =
@@ -1752,8 +1811,12 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
switch (val) {
case 1:
@@ -1798,9 +1861,15 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
if (data->type == f71889fg)
val = SENSORS_LIMIT(val, -128, 127);
@@ -2109,6 +2178,13 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
int err = -ENODEV;
u16 devid;
+ /* Don't step on other drivers' I/O space by accident */
+ if (!request_region(sioaddr, 2, DRVNAME)) {
+ printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
+ (int)sioaddr);
+ return -EBUSY;
+ }
+
superio_enter(sioaddr);
devid = superio_inw(sioaddr, SIO_REG_MANID);
@@ -2151,8 +2227,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
}
*address = superio_inw(sioaddr, SIO_REG_ADDR);
- if (*address == 0)
- {
+ if (*address == 0) {
printk(KERN_WARNING DRVNAME ": Base address not set\n");
goto exit;
}
@@ -2164,6 +2239,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
(int)superio_inb(sioaddr, SIO_REG_DEVREV));
exit:
superio_exit(sioaddr);
+ release_region(sioaddr, 2);
return err;
}
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index bad2cf3ef4a4..0f58ecc5334d 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -662,7 +662,6 @@ exit_remove:
sysfs_remove_group(&client->dev.kobj, &f75375_group);
exit_free:
kfree(data);
- i2c_set_clientdata(client, NULL);
return err;
}
@@ -672,7 +671,6 @@ static int f75375_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &f75375_group);
kfree(data);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 0627f7a5b9b8..b7ca2a9676cf 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -38,6 +38,7 @@
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/smp_lock.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
@@ -847,8 +848,7 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf,
return count;
}
-static int watchdog_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
static struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
@@ -858,6 +858,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
int i, ret = 0;
struct fschmd_data *data = filp->private_data;
+ lock_kernel();
switch (cmd) {
case WDIOC_GETSUPPORT:
ident.firmware_version = data->revision;
@@ -914,7 +915,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
default:
ret = -ENOTTY;
}
-
+ unlock_kernel();
return ret;
}
@@ -924,7 +925,7 @@ static const struct file_operations watchdog_fops = {
.open = watchdog_open,
.release = watchdog_release,
.write = watchdog_write,
- .ioctl = watchdog_ioctl,
+ .unlocked_ioctl = watchdog_ioctl,
};
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 09ea12e0a551..1f63d1a3af5e 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -236,7 +236,6 @@ error_hwmon_device_register:
sysfs_remove_group(&client->dev.kobj, &g760a_group);
error_sysfs_create_group:
kfree(data);
- i2c_set_clientdata(client, NULL);
return err;
}
@@ -247,7 +246,6 @@ static int g760a_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &g760a_group);
kfree(data);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index b2f2277cad3c..6138f036b159 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -41,6 +41,8 @@
/* joystick device poll interval in milliseconds */
#define MDPS_POLL_INTERVAL 50
+#define MDPS_POLL_MIN 0
+#define MDPS_POLL_MAX 2000
/*
* The sensor can also generate interrupts (DRDY) but it's pretty pointless
* because they are generated even if the data do not change. So it's better
@@ -121,11 +123,9 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
int position[3];
int i;
- mutex_lock(&lis3->mutex);
position[0] = lis3->read_data(lis3, OUTX);
position[1] = lis3->read_data(lis3, OUTY);
position[2] = lis3->read_data(lis3, OUTZ);
- mutex_unlock(&lis3->mutex);
for (i = 0; i < 3; i++)
position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY;
@@ -249,8 +249,24 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
+static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
+{
+ int x, y, z;
+
+ mutex_lock(&lis3_dev.mutex);
+ lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+ input_report_abs(pidev->input, ABS_X, x);
+ input_report_abs(pidev->input, ABS_Y, y);
+ input_report_abs(pidev->input, ABS_Z, z);
+ input_sync(pidev->input);
+ mutex_unlock(&lis3_dev.mutex);
+}
+
static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
{
+ if (!test_bit(0, &lis3_dev.misc_opened))
+ goto out;
+
/*
* Be careful: on some HP laptops the bios force DD when on battery and
* the lid is closed. This leads to interrupts as soon as a little move
@@ -260,44 +276,93 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
wake_up_interruptible(&lis3_dev.misc_wait);
kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
+out:
+ if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
+ lis3_dev.idev->input->users)
+ return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
-static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
+static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
{
- int ret;
+ struct input_dev *dev = lis3->idev->input;
+ u8 click_src;
- if (test_and_set_bit(0, &lis3_dev.misc_opened))
- return -EBUSY; /* already open */
+ mutex_lock(&lis3->mutex);
+ lis3->read(lis3, CLICK_SRC, &click_src);
- atomic_set(&lis3_dev.count, 0);
+ if (click_src & CLICK_SINGLE_X) {
+ input_report_key(dev, lis3->mapped_btns[0], 1);
+ input_report_key(dev, lis3->mapped_btns[0], 0);
+ }
- /*
- * The sensor can generate interrupts for free-fall and direction
- * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep
- * the things simple and _fast_ we activate it only for free-fall, so
- * no need to read register (very slow with ACPI). For the same reason,
- * we forbid shared interrupts.
- *
- * IRQF_TRIGGER_RISING seems pointless on HP laptops because the
- * io-apic is not configurable (and generates a warning) but I keep it
- * in case of support for other hardware.
- */
- ret = request_irq(lis3_dev.irq, lis302dl_interrupt, IRQF_TRIGGER_RISING,
- DRIVER_NAME, &lis3_dev);
+ if (click_src & CLICK_SINGLE_Y) {
+ input_report_key(dev, lis3->mapped_btns[1], 1);
+ input_report_key(dev, lis3->mapped_btns[1], 0);
+ }
- if (ret) {
- clear_bit(0, &lis3_dev.misc_opened);
- printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq);
- return -EBUSY;
+ if (click_src & CLICK_SINGLE_Z) {
+ input_report_key(dev, lis3->mapped_btns[2], 1);
+ input_report_key(dev, lis3->mapped_btns[2], 0);
}
+ input_sync(dev);
+ mutex_unlock(&lis3->mutex);
+}
+
+static void lis302dl_interrupt_handle_ff_wu(struct lis3lv02d *lis3)
+{
+ u8 wu1_src;
+ u8 wu2_src;
+
+ lis3->read(lis3, FF_WU_SRC_1, &wu1_src);
+ lis3->read(lis3, FF_WU_SRC_2, &wu2_src);
+
+ wu1_src = wu1_src & FF_WU_SRC_IA ? wu1_src : 0;
+ wu2_src = wu2_src & FF_WU_SRC_IA ? wu2_src : 0;
+
+ /* joystick poll is internally protected by the lis3->mutex. */
+ if (wu1_src || wu2_src)
+ lis3lv02d_joystick_poll(lis3_dev.idev);
+}
+
+static irqreturn_t lis302dl_interrupt_thread1_8b(int irq, void *data)
+{
+
+ struct lis3lv02d *lis3 = data;
+
+ if ((lis3->pdata->irq_cfg & LIS3_IRQ1_MASK) == LIS3_IRQ1_CLICK)
+ lis302dl_interrupt_handle_click(lis3);
+ else
+ lis302dl_interrupt_handle_ff_wu(lis3);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data)
+{
+
+ struct lis3lv02d *lis3 = data;
+
+ if ((lis3->pdata->irq_cfg & LIS3_IRQ2_MASK) == LIS3_IRQ2_CLICK)
+ lis302dl_interrupt_handle_click(lis3);
+ else
+ lis302dl_interrupt_handle_ff_wu(lis3);
+
+ return IRQ_HANDLED;
+}
+
+static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(0, &lis3_dev.misc_opened))
+ return -EBUSY; /* already open */
+
+ atomic_set(&lis3_dev.count, 0);
return 0;
}
static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
{
fasync_helper(-1, file, 0, &lis3_dev.async_queue);
- free_irq(lis3_dev.irq, &lis3_dev);
clear_bit(0, &lis3_dev.misc_opened); /* release the device */
return 0;
}
@@ -380,22 +445,12 @@ static struct miscdevice lis3lv02d_misc_device = {
.fops = &lis3lv02d_misc_fops,
};
-static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
-{
- int x, y, z;
-
- lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
- input_report_abs(pidev->input, ABS_X, x);
- input_report_abs(pidev->input, ABS_Y, y);
- input_report_abs(pidev->input, ABS_Z, z);
- input_sync(pidev->input);
-}
-
int lis3lv02d_joystick_enable(void)
{
struct input_dev *input_dev;
int err;
int max_val, fuzz, flat;
+ int btns[] = {BTN_X, BTN_Y, BTN_Z};
if (lis3_dev.idev)
return -EINVAL;
@@ -406,6 +461,8 @@ int lis3lv02d_joystick_enable(void)
lis3_dev.idev->poll = lis3lv02d_joystick_poll;
lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
+ lis3_dev.idev->poll_interval_min = MDPS_POLL_MIN;
+ lis3_dev.idev->poll_interval_max = MDPS_POLL_MAX;
input_dev = lis3_dev.idev->input;
input_dev->name = "ST LIS3LV02DL Accelerometer";
@@ -422,6 +479,10 @@ int lis3lv02d_joystick_enable(void)
input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
+ lis3_dev.mapped_btns[0] = lis3lv02d_get_axis(abs(lis3_dev.ac.x), btns);
+ lis3_dev.mapped_btns[1] = lis3lv02d_get_axis(abs(lis3_dev.ac.y), btns);
+ lis3_dev.mapped_btns[2] = lis3lv02d_get_axis(abs(lis3_dev.ac.z), btns);
+
err = input_register_polled_device(lis3_dev.idev);
if (err) {
input_free_polled_device(lis3_dev.idev);
@@ -434,6 +495,11 @@ EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
void lis3lv02d_joystick_disable(void)
{
+ if (lis3_dev.irq)
+ free_irq(lis3_dev.irq, &lis3_dev);
+ if (lis3_dev.pdata && lis3_dev.pdata->irq2)
+ free_irq(lis3_dev.pdata->irq2, &lis3_dev);
+
if (!lis3_dev.idev)
return;
@@ -462,7 +528,9 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
{
int x, y, z;
+ mutex_lock(&lis3_dev.mutex);
lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+ mutex_unlock(&lis3_dev.mutex);
return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
}
@@ -521,12 +589,70 @@ int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
}
EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
+static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
+ struct lis3lv02d_platform_data *p)
+{
+ int err;
+ int ctrl2 = p->hipass_ctrl;
+
+ if (p->click_flags) {
+ dev->write(dev, CLICK_CFG, p->click_flags);
+ dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
+ dev->write(dev, CLICK_LATENCY, p->click_latency);
+ dev->write(dev, CLICK_WINDOW, p->click_window);
+ dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
+ dev->write(dev, CLICK_THSY_X,
+ (p->click_thresh_x & 0xf) |
+ (p->click_thresh_y << 4));
+
+ if (dev->idev) {
+ struct input_dev *input_dev = lis3_dev.idev->input;
+ input_set_capability(input_dev, EV_KEY, BTN_X);
+ input_set_capability(input_dev, EV_KEY, BTN_Y);
+ input_set_capability(input_dev, EV_KEY, BTN_Z);
+ }
+ }
+
+ if (p->wakeup_flags) {
+ dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
+ dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
+ /* default to 2.5ms for now */
+ dev->write(dev, FF_WU_DURATION_1, 1);
+ ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/
+ }
+
+ if (p->wakeup_flags2) {
+ dev->write(dev, FF_WU_CFG_2, p->wakeup_flags2);
+ dev->write(dev, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f);
+ /* default to 2.5ms for now */
+ dev->write(dev, FF_WU_DURATION_2, 1);
+ ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/
+ }
+ /* Configure hipass filters */
+ dev->write(dev, CTRL_REG2, ctrl2);
+
+ if (p->irq2) {
+ err = request_threaded_irq(p->irq2,
+ NULL,
+ lis302dl_interrupt_thread2_8b,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
+ DRIVER_NAME, &lis3_dev);
+ if (err < 0)
+ printk(KERN_ERR DRIVER_NAME
+ "No second IRQ. Limited functionality\n");
+ }
+}
+
/*
* Initialise the accelerometer and the various subsystems.
* Should be rather independent of the bus system.
*/
int lis3lv02d_init_device(struct lis3lv02d *dev)
{
+ int err;
+ irq_handler_t thread_fn;
+
dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I);
switch (dev->whoami) {
@@ -567,25 +693,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
if (dev->pdata) {
struct lis3lv02d_platform_data *p = dev->pdata;
- if (p->click_flags && (dev->whoami == WAI_8B)) {
- dev->write(dev, CLICK_CFG, p->click_flags);
- dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
- dev->write(dev, CLICK_LATENCY, p->click_latency);
- dev->write(dev, CLICK_WINDOW, p->click_window);
- dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
- dev->write(dev, CLICK_THSY_X,
- (p->click_thresh_x & 0xf) |
- (p->click_thresh_y << 4));
- }
-
- if (p->wakeup_flags && (dev->whoami == WAI_8B)) {
- dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
- dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
- /* default to 2.5ms for now */
- dev->write(dev, FF_WU_DURATION_1, 1);
- /* enable high pass filter for both free-fall units */
- dev->write(dev, CTRL_REG2, HP_FF_WU1 | HP_FF_WU2);
- }
+ if (dev->whoami == WAI_8B)
+ lis3lv02d_8b_configure(dev, p);
if (p->irq_cfg)
dev->write(dev, CTRL_REG3, p->irq_cfg);
@@ -598,6 +707,32 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
goto out;
}
+ /*
+ * The sensor can generate interrupts for free-fall and direction
+ * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep
+ * the things simple and _fast_ we activate it only for free-fall, so
+ * no need to read register (very slow with ACPI). For the same reason,
+ * we forbid shared interrupts.
+ *
+ * IRQF_TRIGGER_RISING seems pointless on HP laptops because the
+ * io-apic is not configurable (and generates a warning) but I keep it
+ * in case of support for other hardware.
+ */
+ if (dev->whoami == WAI_8B)
+ thread_fn = lis302dl_interrupt_thread1_8b;
+ else
+ thread_fn = NULL;
+
+ err = request_threaded_irq(dev->irq, lis302dl_interrupt,
+ thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ DRIVER_NAME, &lis3_dev);
+
+ if (err < 0) {
+ printk(KERN_ERR DRIVER_NAME "Cannot get IRQ\n");
+ goto out;
+ }
+
if (misc_register(&lis3lv02d_misc_device))
printk(KERN_ERR DRIVER_NAME ": misc_register failed\n");
out:
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index e6a01f44709b..854091380e33 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -196,6 +196,16 @@ enum lis3lv02d_dd_src {
DD_SRC_IA = 0x40,
};
+enum lis3lv02d_click_src_8b {
+ CLICK_SINGLE_X = 0x01,
+ CLICK_DOUBLE_X = 0x02,
+ CLICK_SINGLE_Y = 0x04,
+ CLICK_DOUBLE_Y = 0x08,
+ CLICK_SINGLE_Z = 0x10,
+ CLICK_DOUBLE_Z = 0x20,
+ CLICK_IA = 0x40,
+};
+
struct axis_conversion {
s8 x;
s8 y;
@@ -223,6 +233,7 @@ struct lis3lv02d {
struct platform_device *pdev; /* platform device */
atomic_t count; /* interrupt count after last read */
struct axis_conversion ac; /* hw -> logical axis */
+ int mapped_btns[3];
u32 irq; /* IRQ number */
struct fasync_struct *async_queue; /* queue for the misc device */
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index bf81aff7051d..776aeb3019d2 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -53,7 +53,7 @@
* Address is fully defined internally and cannot be changed.
*/
-static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
+static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
/*
* The LM63 registers
@@ -131,12 +131,15 @@ static struct lm63_data *lm63_update_device(struct device *dev);
static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm63_init_client(struct i2c_client *client);
+enum chips { lm63, lm64 };
+
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id lm63_id[] = {
- { "lm63", 0 },
+ { "lm63", lm63 },
+ { "lm64", lm64 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
@@ -422,6 +425,7 @@ static int lm63_detect(struct i2c_client *new_client,
struct i2c_adapter *adapter = new_client->adapter;
u8 man_id, chip_id, reg_config1, reg_config2;
u8 reg_alert_status, reg_alert_mask;
+ int address = new_client->addr;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -439,7 +443,6 @@ static int lm63_detect(struct i2c_client *new_client,
LM63_REG_ALERT_MASK);
if (man_id != 0x01 /* National Semiconductor */
- || chip_id != 0x41 /* LM63 */
|| (reg_config1 & 0x18) != 0x00
|| (reg_config2 & 0xF8) != 0x00
|| (reg_alert_status & 0x20) != 0x00
@@ -450,7 +453,12 @@ static int lm63_detect(struct i2c_client *new_client,
return -ENODEV;
}
- strlcpy(info->type, "lm63", I2C_NAME_SIZE);
+ if (chip_id == 0x41 && address == 0x4c)
+ strlcpy(info->type, "lm63", I2C_NAME_SIZE);
+ else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e))
+ strlcpy(info->type, "lm64", I2C_NAME_SIZE);
+ else
+ return -ENODEV;
return 0;
}
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 4d1b76bc8148..29b9030d42c3 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -136,7 +136,6 @@ static int lm73_remove(struct i2c_client *client)
hwmon_device_unregister(hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm73_group);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 8ae2cfe2d827..393f354f92a4 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -46,6 +46,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
tcn75,
tmp100,
tmp101,
+ tmp105,
tmp175,
tmp275,
tmp75,
@@ -191,7 +192,6 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
exit_remove:
sysfs_remove_group(&client->dev.kobj, &lm75_group);
exit_free:
- i2c_set_clientdata(client, NULL);
kfree(data);
return status;
}
@@ -203,7 +203,6 @@ static int lm75_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm75_group);
lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
- i2c_set_clientdata(client, NULL);
kfree(data);
return 0;
}
@@ -220,6 +219,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "tcn75", tcn75, },
{ "tmp100", tmp100, },
{ "tmp101", tmp101, },
+ { "tmp105", tmp105, },
{ "tmp175", tmp175, },
{ "tmp275", tmp275, },
{ "tmp75", tmp75, },
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 7cc2708871ab..760ef72eea56 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -982,7 +982,8 @@ static struct lm90_data *lm90_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
+ if (time_after(jiffies, data->last_updated + HZ / 2 + HZ / 10)
+ || !data->valid) {
u8 h, l;
dev_dbg(&client->dev, "Updating lm90 data.\n");
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 8fc8eb8cba47..94741d42112d 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -399,7 +399,6 @@ static int lm95241_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm95241_group);
- i2c_set_clientdata(client, NULL);
kfree(data);
return 0;
}
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 65c232a9d0c5..21d201befc2c 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -45,9 +45,7 @@ enum ltc4245_cmd {
LTC4245_VEEIN = 0x19,
LTC4245_VEESENSE = 0x1a,
LTC4245_VEEOUT = 0x1b,
- LTC4245_GPIOADC1 = 0x1c,
- LTC4245_GPIOADC2 = 0x1d,
- LTC4245_GPIOADC3 = 0x1e,
+ LTC4245_GPIOADC = 0x1c,
};
struct ltc4245_data {
@@ -61,7 +59,7 @@ struct ltc4245_data {
u8 cregs[0x08];
/* Voltage registers */
- u8 vregs[0x0f];
+ u8 vregs[0x0d];
};
static struct ltc4245_data *ltc4245_update_device(struct device *dev)
@@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
data->cregs[i] = val;
}
- /* Read voltage registers -- 0x10 to 0x1f */
+ /* Read voltage registers -- 0x10 to 0x1c */
for (i = 0; i < ARRAY_SIZE(data->vregs); i++) {
val = i2c_smbus_read_byte_data(client, i+0x10);
if (unlikely(val < 0))
@@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg)
case LTC4245_VEEOUT:
voltage = regval * -55;
break;
- case LTC4245_GPIOADC1:
- case LTC4245_GPIOADC2:
- case LTC4245_GPIOADC3:
+ case LTC4245_GPIOADC:
voltage = regval * 10;
break;
default:
@@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2);
LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2);
/* GPIO voltages */
-LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1);
-LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2);
-LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3);
+LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC);
/* Power Consumption (virtual) */
LTC4245_POWER(power1_input, LTC4245_12VSENSE);
@@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = {
&sensor_dev_attr_in8_min_alarm.dev_attr.attr,
&sensor_dev_attr_in9_input.dev_attr.attr,
- &sensor_dev_attr_in10_input.dev_attr.attr,
- &sensor_dev_attr_in11_input.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power2_input.dev_attr.attr,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
new file mode 100644
index 000000000000..93187c3cb5e7
--- /dev/null
+++ b/drivers/hwmon/tmp102.c
@@ -0,0 +1,319 @@
+/* Texas Instruments TMP102 SMBus temperature sensor driver
+ *
+ * Copyright (C) 2010 Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+
+#define DRIVER_NAME "tmp102"
+
+#define TMP102_TEMP_REG 0x00
+#define TMP102_CONF_REG 0x01
+/* note: these bit definitions are byte swapped */
+#define TMP102_CONF_SD 0x0100
+#define TMP102_CONF_TM 0x0200
+#define TMP102_CONF_POL 0x0400
+#define TMP102_CONF_F0 0x0800
+#define TMP102_CONF_F1 0x1000
+#define TMP102_CONF_R0 0x2000
+#define TMP102_CONF_R1 0x4000
+#define TMP102_CONF_OS 0x8000
+#define TMP102_CONF_EM 0x0010
+#define TMP102_CONF_AL 0x0020
+#define TMP102_CONF_CR0 0x0040
+#define TMP102_CONF_CR1 0x0080
+#define TMP102_TLOW_REG 0x02
+#define TMP102_THIGH_REG 0x03
+
+struct tmp102 {
+ struct device *hwmon_dev;
+ struct mutex lock;
+ u16 config_orig;
+ unsigned long last_update;
+ int temp[3];
+};
+
+/* SMBus specifies low byte first, but the TMP102 returns high byte first,
+ * so we have to swab16 the values */
+static inline int tmp102_read_reg(struct i2c_client *client, u8 reg)
+{
+ int result = i2c_smbus_read_word_data(client, reg);
+ return result < 0 ? result : swab16(result);
+}
+
+static inline int tmp102_write_reg(struct i2c_client *client, u8 reg, u16 val)
+{
+ return i2c_smbus_write_word_data(client, reg, swab16(val));
+}
+
+/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
+static inline int tmp102_reg_to_mC(s16 val)
+{
+ return ((val & ~0x01) * 1000) / 128;
+}
+
+/* convert milliCelsius to left adjusted 13-bit TMP102 register value */
+static inline u16 tmp102_mC_to_reg(int val)
+{
+ return (val * 128) / 1000;
+}
+
+static const u8 tmp102_reg[] = {
+ TMP102_TEMP_REG,
+ TMP102_TLOW_REG,
+ TMP102_THIGH_REG,
+};
+
+static struct tmp102 *tmp102_update_device(struct i2c_client *client)
+{
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
+
+ mutex_lock(&tmp102->lock);
+ if (time_after(jiffies, tmp102->last_update + HZ / 3)) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) {
+ int status = tmp102_read_reg(client, tmp102_reg[i]);
+ if (status > -1)
+ tmp102->temp[i] = tmp102_reg_to_mC(status);
+ }
+ tmp102->last_update = jiffies;
+ }
+ mutex_unlock(&tmp102->lock);
+ return tmp102;
+}
+
+static ssize_t tmp102_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct tmp102 *tmp102 = tmp102_update_device(to_i2c_client(dev));
+
+ return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
+}
+
+static ssize_t tmp102_set_temp(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
+ long val;
+ int status;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
+ val = SENSORS_LIMIT(val, -256000, 255000);
+
+ mutex_lock(&tmp102->lock);
+ tmp102->temp[sda->index] = val;
+ status = tmp102_write_reg(client, tmp102_reg[sda->index],
+ tmp102_mC_to_reg(val));
+ mutex_unlock(&tmp102->lock);
+ return status ? : count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0);
+
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp,
+ tmp102_set_temp, 1);
+
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp,
+ tmp102_set_temp, 2);
+
+static struct attribute *tmp102_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tmp102_attr_group = {
+ .attrs = tmp102_attributes,
+};
+
+#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
+#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
+
+static int __devinit tmp102_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tmp102 *tmp102;
+ int status;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev, "adapter doesnt support SMBus word "
+ "transactions\n");
+ return -ENODEV;
+ }
+
+ tmp102 = kzalloc(sizeof(*tmp102), GFP_KERNEL);
+ if (!tmp102) {
+ dev_dbg(&client->dev, "kzalloc failed\n");
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(client, tmp102);
+
+ status = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (status < 0) {
+ dev_err(&client->dev, "error reading config register\n");
+ goto fail_free;
+ }
+ tmp102->config_orig = status;
+ status = tmp102_write_reg(client, TMP102_CONF_REG, TMP102_CONFIG);
+ if (status < 0) {
+ dev_err(&client->dev, "error writing config register\n");
+ goto fail_restore_config;
+ }
+ status = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (status < 0) {
+ dev_err(&client->dev, "error reading config register\n");
+ goto fail_restore_config;
+ }
+ status &= ~TMP102_CONFIG_RD_ONLY;
+ if (status != TMP102_CONFIG) {
+ dev_err(&client->dev, "config settings did not stick\n");
+ status = -ENODEV;
+ goto fail_restore_config;
+ }
+ tmp102->last_update = jiffies - HZ;
+ mutex_init(&tmp102->lock);
+
+ status = sysfs_create_group(&client->dev.kobj, &tmp102_attr_group);
+ if (status) {
+ dev_dbg(&client->dev, "could not create sysfs files\n");
+ goto fail_restore_config;
+ }
+ tmp102->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(tmp102->hwmon_dev)) {
+ dev_dbg(&client->dev, "unable to register hwmon device\n");
+ status = PTR_ERR(tmp102->hwmon_dev);
+ goto fail_remove_sysfs;
+ }
+
+ dev_info(&client->dev, "initialized\n");
+
+ return 0;
+
+fail_remove_sysfs:
+ sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
+fail_restore_config:
+ tmp102_write_reg(client, TMP102_CONF_REG, tmp102->config_orig);
+fail_free:
+ kfree(tmp102);
+
+ return status;
+}
+
+static int __devexit tmp102_remove(struct i2c_client *client)
+{
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(tmp102->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
+
+ /* Stop monitoring if device was stopped originally */
+ if (tmp102->config_orig & TMP102_CONF_SD) {
+ int config;
+
+ config = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (config >= 0)
+ tmp102_write_reg(client, TMP102_CONF_REG,
+ config | TMP102_CONF_SD);
+ }
+
+ kfree(tmp102);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tmp102_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int config;
+
+ config = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (config < 0)
+ return config;
+
+ config |= TMP102_CONF_SD;
+ return tmp102_write_reg(client, TMP102_CONF_REG, config);
+}
+
+static int tmp102_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int config;
+
+ config = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (config < 0)
+ return config;
+
+ config &= ~TMP102_CONF_SD;
+ return tmp102_write_reg(client, TMP102_CONF_REG, config);
+}
+
+static const struct dev_pm_ops tmp102_dev_pm_ops = {
+ .suspend = tmp102_suspend,
+ .resume = tmp102_resume,
+};
+
+#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops)
+#else
+#define TMP102_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id tmp102_id[] = {
+ { "tmp102", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp102_id);
+
+static struct i2c_driver tmp102_driver = {
+ .driver.name = DRIVER_NAME,
+ .driver.pm = TMP102_DEV_PM_OPS,
+ .probe = tmp102_probe,
+ .remove = __devexit_p(tmp102_remove),
+ .id_table = tmp102_id,
+};
+
+static int __init tmp102_init(void)
+{
+ return i2c_add_driver(&tmp102_driver);
+}
+module_init(tmp102_init);
+
+static void __exit tmp102_exit(void)
+{
+ i2c_del_driver(&tmp102_driver);
+}
+module_exit(tmp102_exit);
+
+MODULE_AUTHOR("Steven King <sfking@fdwdc.com>");
+MODULE_DESCRIPTION("Texas Instruments TMP102 temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index d14a1af9f550..ad8d535235c5 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -92,17 +92,6 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 };
#define TMP411_DEVICE_ID 0x12
/*
- * Functions declarations
- */
-
-static int tmp401_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int tmp401_detect(struct i2c_client *client,
- struct i2c_board_info *info);
-static int tmp401_remove(struct i2c_client *client);
-static struct tmp401_data *tmp401_update_device(struct device *dev);
-
-/*
* Driver data (common to all clients)
*/
@@ -113,18 +102,6 @@ static const struct i2c_device_id tmp401_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp401_id);
-static struct i2c_driver tmp401_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "tmp401",
- },
- .probe = tmp401_probe,
- .remove = tmp401_remove,
- .id_table = tmp401_id,
- .detect = tmp401_detect,
- .address_list = normal_i2c,
-};
-
/*
* Client data (each client gets its own)
*/
@@ -194,6 +171,71 @@ static u8 tmp401_crit_temp_to_register(long temp, u8 config)
return (temp + 500) / 1000;
}
+static struct tmp401_data *tmp401_update_device_reg16(
+ struct i2c_client *client, struct tmp401_data *data)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ /*
+ * High byte must be read first immediately followed
+ * by the low byte
+ */
+ data->temp[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_MSB[i]) << 8;
+ data->temp[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LSB[i]);
+ data->temp_low[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
+ data->temp_low[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LOW_LIMIT_LSB[i]);
+ data->temp_high[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
+ data->temp_high[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_HIGH_LIMIT_LSB[i]);
+ data->temp_crit[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_CRIT_LIMIT[i]);
+
+ if (data->kind == tmp411) {
+ data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
+ TMP411_TEMP_LOWEST_MSB[i]) << 8;
+ data->temp_lowest[i] |= i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_LOWEST_LSB[i]);
+
+ data->temp_highest[i] = i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
+ data->temp_highest[i] |= i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_HIGHEST_LSB[i]);
+ }
+ }
+ return data;
+}
+
+static struct tmp401_data *tmp401_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tmp401_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
+ data->config = i2c_smbus_read_byte_data(client,
+ TMP401_CONFIG_READ);
+ tmp401_update_device_reg16(client, data);
+
+ data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_CRIT_HYST);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
static ssize_t show_temp_value(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -420,30 +462,36 @@ static ssize_t reset_temp_history(struct device *dev,
}
static struct sensor_device_attribute tmp401_attr[] = {
- SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0),
- SENSOR_ATTR(temp1_min, 0644, show_temp_min, store_temp_min, 0),
- SENSOR_ATTR(temp1_max, 0644, show_temp_max, store_temp_max, 0),
- SENSOR_ATTR(temp1_crit, 0644, show_temp_crit, store_temp_crit, 0),
- SENSOR_ATTR(temp1_crit_hyst, 0644, show_temp_crit_hyst,
+ SENSOR_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0),
+ SENSOR_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min,
+ store_temp_min, 0),
+ SENSOR_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
+ store_temp_max, 0),
+ SENSOR_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ store_temp_crit, 0),
+ SENSOR_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_crit_hyst,
store_temp_crit_hyst, 0),
- SENSOR_ATTR(temp1_min_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_min_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_LOW),
- SENSOR_ATTR(temp1_max_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_max_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_HIGH),
- SENSOR_ATTR(temp1_crit_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_crit_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_CRIT),
- SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1),
- SENSOR_ATTR(temp2_min, 0644, show_temp_min, store_temp_min, 1),
- SENSOR_ATTR(temp2_max, 0644, show_temp_max, store_temp_max, 1),
- SENSOR_ATTR(temp2_crit, 0644, show_temp_crit, store_temp_crit, 1),
- SENSOR_ATTR(temp2_crit_hyst, 0444, show_temp_crit_hyst, NULL, 1),
- SENSOR_ATTR(temp2_fault, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1),
+ SENSOR_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min,
+ store_temp_min, 1),
+ SENSOR_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
+ store_temp_max, 1),
+ SENSOR_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ store_temp_crit, 1),
+ SENSOR_ATTR(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 1),
+ SENSOR_ATTR(temp2_fault, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_OPEN),
- SENSOR_ATTR(temp2_min_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_min_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_LOW),
- SENSOR_ATTR(temp2_max_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_max_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_HIGH),
- SENSOR_ATTR(temp2_crit_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_crit_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_CRIT),
};
@@ -455,11 +503,11 @@ static struct sensor_device_attribute tmp401_attr[] = {
* and remote channels.
*/
static struct sensor_device_attribute tmp411_attr[] = {
- SENSOR_ATTR(temp1_highest, 0444, show_temp_highest, NULL, 0),
- SENSOR_ATTR(temp1_lowest, 0444, show_temp_lowest, NULL, 0),
- SENSOR_ATTR(temp2_highest, 0444, show_temp_highest, NULL, 1),
- SENSOR_ATTR(temp2_lowest, 0444, show_temp_lowest, NULL, 1),
- SENSOR_ATTR(temp_reset_history, 0200, NULL, reset_temp_history, 0),
+ SENSOR_ATTR(temp1_highest, S_IRUGO, show_temp_highest, NULL, 0),
+ SENSOR_ATTR(temp1_lowest, S_IRUGO, show_temp_lowest, NULL, 0),
+ SENSOR_ATTR(temp2_highest, S_IRUGO, show_temp_highest, NULL, 1),
+ SENSOR_ATTR(temp2_lowest, S_IRUGO, show_temp_lowest, NULL, 1),
+ SENSOR_ATTR(temp_reset_history, S_IWUSR, NULL, reset_temp_history, 0),
};
/*
@@ -529,6 +577,27 @@ static int tmp401_detect(struct i2c_client *client,
return 0;
}
+static int tmp401_remove(struct i2c_client *client)
+{
+ struct tmp401_data *data = i2c_get_clientdata(client);
+ int i;
+
+ if (data->hwmon_dev)
+ hwmon_device_unregister(data->hwmon_dev);
+
+ for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++)
+ device_remove_file(&client->dev, &tmp401_attr[i].dev_attr);
+
+ if (data->kind == tmp411) {
+ for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
+ device_remove_file(&client->dev,
+ &tmp411_attr[i].dev_attr);
+ }
+
+ kfree(data);
+ return 0;
+}
+
static int tmp401_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -581,91 +650,17 @@ exit_remove:
return err;
}
-static int tmp401_remove(struct i2c_client *client)
-{
- struct tmp401_data *data = i2c_get_clientdata(client);
- int i;
-
- if (data->hwmon_dev)
- hwmon_device_unregister(data->hwmon_dev);
-
- for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++)
- device_remove_file(&client->dev, &tmp401_attr[i].dev_attr);
-
- if (data->kind == tmp411) {
- for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
- device_remove_file(&client->dev,
- &tmp411_attr[i].dev_attr);
- }
-
- kfree(data);
- return 0;
-}
-
-static struct tmp401_data *tmp401_update_device_reg16(
- struct i2c_client *client, struct tmp401_data *data)
-{
- int i;
-
- for (i = 0; i < 2; i++) {
- /*
- * High byte must be read first immediately followed
- * by the low byte
- */
- data->temp[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_MSB[i]) << 8;
- data->temp[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LSB[i]);
- data->temp_low[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
- data->temp_low[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LOW_LIMIT_LSB[i]);
- data->temp_high[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
- data->temp_high[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_HIGH_LIMIT_LSB[i]);
- data->temp_crit[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_CRIT_LIMIT[i]);
-
- if (data->kind == tmp411) {
- data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
- TMP411_TEMP_LOWEST_MSB[i]) << 8;
- data->temp_lowest[i] |= i2c_smbus_read_byte_data(
- client, TMP411_TEMP_LOWEST_LSB[i]);
-
- data->temp_highest[i] = i2c_smbus_read_byte_data(
- client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
- data->temp_highest[i] |= i2c_smbus_read_byte_data(
- client, TMP411_TEMP_HIGHEST_LSB[i]);
- }
- }
- return data;
-}
-
-static struct tmp401_data *tmp401_update_device(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
- data->config = i2c_smbus_read_byte_data(client,
- TMP401_CONFIG_READ);
- tmp401_update_device_reg16(client, data);
-
- data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_CRIT_HYST);
-
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
+static struct i2c_driver tmp401_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "tmp401",
+ },
+ .probe = tmp401_probe,
+ .remove = tmp401_remove,
+ .id_table = tmp401_id,
+ .detect = tmp401_detect,
+ .address_list = normal_i2c,
+};
static int __init tmp401_init(void)
{
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 738c472ece27..6b4165c12092 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -295,7 +295,6 @@ exit_remove:
sysfs_remove_group(&client->dev.kobj, &tmp421_group);
exit_free:
- i2c_set_clientdata(client, NULL);
kfree(data);
return err;
@@ -308,7 +307,6 @@ static int tmp421_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &tmp421_group);
- i2c_set_clientdata(client, NULL);
kfree(data);
return 0;
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 68e90abeba96..5da5942cf970 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -300,8 +300,11 @@ static const struct of_device_id env_match[] = {
MODULE_DEVICE_TABLE(of, env_match);
static struct of_platform_driver env_driver = {
- .name = "ultra45_env",
- .match_table = env_match,
+ .driver = {
+ .name = "ultra45_env",
+ .owner = THIS_MODULE,
+ .of_match_table = env_match,
+ },
.probe = env_probe,
.remove = __devexit_p(env_remove),
};
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 32d4adee73db..c84b9b4e6960 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1197,7 +1197,6 @@ ERROR4:
if (data->lm75[1])
i2c_unregister_device(data->lm75[1]);
ERROR3:
- i2c_set_clientdata(client, NULL);
kfree(data);
ERROR1:
return err;
@@ -1219,7 +1218,6 @@ w83781d_remove(struct i2c_client *client)
if (data->lm75[1])
i2c_unregister_device(data->lm75[1]);
- i2c_set_clientdata(client, NULL);
kfree(data);
return 0;
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 612807d97155..697202e27891 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
+#include <linux/smp_lock.h>
#include <linux/hwmon-vid.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
@@ -1319,8 +1320,8 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf,
return count;
}
-static int watchdog_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static long watchdog_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
{
static struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING |
@@ -1332,6 +1333,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
int val, ret = 0;
struct w83793_data *data = filp->private_data;
+ lock_kernel();
switch (cmd) {
case WDIOC_GETSUPPORT:
if (!nowayout)
@@ -1385,7 +1387,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
default:
ret = -ENOTTY;
}
-
+ unlock_kernel();
return ret;
}
@@ -1395,7 +1397,7 @@ static const struct file_operations watchdog_fops = {
.open = watchdog_open,
.release = watchdog_close,
.write = watchdog_write,
- .ioctl = watchdog_ioctl,
+ .unlocked_ioctl = watchdog_ioctl,
};
/*
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 87ab0568bb0e..bceafbfa7268 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -475,6 +475,26 @@ config I2C_PASEMI
help
Supports the PA Semi PWRficient on-chip SMBus interfaces.
+config I2C_PCA_PLATFORM
+ tristate "PCA9564/PCA9665 as platform device"
+ select I2C_ALGOPCA
+ default n
+ help
+ This driver supports a memory mapped Philips PCA9564/PCA9665
+ parallel bus to I2C bus controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-pca-platform.
+
+config I2C_PMCMSP
+ tristate "PMC MSP I2C TWI Controller"
+ depends on PMC_MSP
+ help
+ This driver supports the PMC TWI controller on MSP devices.
+
+ This driver can also be built as module. If so, the module
+ will be called i2c-pmcmsp.
+
config I2C_PNX
tristate "I2C bus support for Philips PNX targets"
depends on ARCH_PNX4008
@@ -711,26 +731,6 @@ config I2C_PCA_ISA
delays when I2C/SMBus chip drivers are loaded (e.g. at boot
time). If unsure, say N.
-config I2C_PCA_PLATFORM
- tristate "PCA9564/PCA9665 as platform device"
- select I2C_ALGOPCA
- default n
- help
- This driver supports a memory mapped Philips PCA9564/PCA9665
- parallel bus to I2C bus controller.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-pca-platform.
-
-config I2C_PMCMSP
- tristate "PMC MSP I2C TWI Controller"
- depends on PMC_MSP
- help
- This driver supports the PMC TWI controller on MSP devices.
-
- This driver can also be built as module. If so, the module
- will be called i2c-pmcmsp.
-
config I2C_SIBYTE
tristate "SiByte SMBus interface"
depends on SIBYTE_SB1xxx_SOC
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 097236f631e8..936880bd1dc5 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o
obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o
obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
-# Embebbed system I2C/SMBus host controller drivers
+# Embedded system I2C/SMBus host controller drivers
obj-$(CONFIG_I2C_AT91) += i2c-at91.o
obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
@@ -46,6 +46,8 @@ obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
+obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
+obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
@@ -68,8 +70,6 @@ obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
-obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
-obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
obj-$(CONFIG_I2C_STUB) += i2c-stub.o
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 16948db38973..b02b4533651d 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -440,7 +440,7 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
init_waitqueue_head(&cpm->i2c_wait);
- cpm->irq = of_irq_to_resource(ofdev->node, 0, NULL);
+ cpm->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (!cpm->irq)
return -EINVAL;
@@ -451,13 +451,13 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
return ret;
/* I2C parameter RAM */
- i2c_base = of_iomap(ofdev->node, 1);
+ i2c_base = of_iomap(ofdev->dev.of_node, 1);
if (i2c_base == NULL) {
ret = -EINVAL;
goto out_irq;
}
- if (of_device_is_compatible(ofdev->node, "fsl,cpm1-i2c")) {
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm1-i2c")) {
/* Check for and use a microcode relocation patch. */
cpm->i2c_ram = i2c_base;
@@ -474,7 +474,7 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
cpm->version = 1;
- } else if (of_device_is_compatible(ofdev->node, "fsl,cpm2-i2c")) {
+ } else if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm2-i2c")) {
cpm->i2c_addr = cpm_muram_alloc(sizeof(struct i2c_ram), 64);
cpm->i2c_ram = cpm_muram_addr(cpm->i2c_addr);
out_be16(i2c_base, cpm->i2c_addr);
@@ -489,24 +489,24 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
}
/* I2C control/status registers */
- cpm->i2c_reg = of_iomap(ofdev->node, 0);
+ cpm->i2c_reg = of_iomap(ofdev->dev.of_node, 0);
if (cpm->i2c_reg == NULL) {
ret = -EINVAL;
goto out_ram;
}
- data = of_get_property(ofdev->node, "fsl,cpm-command", &len);
+ data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
if (!data || len != 4) {
ret = -EINVAL;
goto out_reg;
}
cpm->cp_command = *data;
- data = of_get_property(ofdev->node, "linux,i2c-class", &len);
+ data = of_get_property(ofdev->dev.of_node, "linux,i2c-class", &len);
if (data && len == 4)
cpm->adap.class = *data;
- data = of_get_property(ofdev->node, "clock-frequency", &len);
+ data = of_get_property(ofdev->dev.of_node, "clock-frequency", &len);
if (data && len == 4)
cpm->freq = *data;
else
@@ -661,7 +661,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev,
/* register new adapter to i2c module... */
- data = of_get_property(ofdev->node, "linux,i2c-index", &len);
+ data = of_get_property(ofdev->dev.of_node, "linux,i2c-index", &len);
if (data && len == 4) {
cpm->adap.nr = *data;
result = i2c_add_numbered_adapter(&cpm->adap);
@@ -679,7 +679,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev,
/*
* register OF I2C devices
*/
- of_register_i2c_devices(&cpm->adap, ofdev->node);
+ of_register_i2c_devices(&cpm->adap, ofdev->dev.of_node);
return 0;
out_shut:
@@ -718,13 +718,13 @@ static const struct of_device_id cpm_i2c_match[] = {
MODULE_DEVICE_TABLE(of, cpm_i2c_match);
static struct of_platform_driver cpm_i2c_driver = {
- .match_table = cpm_i2c_match,
.probe = cpm_i2c_probe,
.remove = __devexit_p(cpm_i2c_remove),
- .driver = {
- .name = "fsl-i2c-cpm",
- .owner = THIS_MODULE,
- }
+ .driver = {
+ .name = "fsl-i2c-cpm",
+ .owner = THIS_MODULE,
+ .of_match_table = cpm_i2c_match,
+ },
};
static int __init cpm_i2c_init(void)
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index f8ccc0fe95a8..bf344135647a 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -664,7 +664,7 @@ static inline u8 iic_clckdiv(unsigned int opb)
static int __devinit iic_request_irq(struct of_device *ofdev,
struct ibm_iic_private *dev)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
int irq;
if (iic_force_poll)
@@ -695,7 +695,7 @@ static int __devinit iic_request_irq(struct of_device *ofdev,
static int __devinit iic_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct ibm_iic_private *dev;
struct i2c_adapter *adap;
const u32 *freq;
@@ -807,8 +807,11 @@ static const struct of_device_id ibm_iic_match[] = {
};
static struct of_platform_driver ibm_iic_driver = {
- .name = "ibm-iic",
- .match_table = ibm_iic_match,
+ .driver = {
+ .name = "ibm-iic",
+ .owner = THIS_MODULE,
+ .of_match_table = ibm_iic_match,
+ },
.probe = iic_probe,
.remove = __devexit_p(iic_remove),
};
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index e86cef300c7d..df00eb1f11f9 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -560,14 +560,14 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
init_waitqueue_head(&i2c->queue);
- i2c->base = of_iomap(op->node, 0);
+ i2c->base = of_iomap(op->dev.of_node, 0);
if (!i2c->base) {
dev_err(i2c->dev, "failed to map controller\n");
result = -ENOMEM;
goto fail_map;
}
- i2c->irq = irq_of_parse_and_map(op->node, 0);
+ i2c->irq = irq_of_parse_and_map(op->dev.of_node, 0);
if (i2c->irq) { /* no i2c->irq implies polling */
result = request_irq(i2c->irq, mpc_i2c_isr,
IRQF_SHARED, "i2c-mpc", i2c);
@@ -577,21 +577,22 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
}
}
- if (of_get_property(op->node, "fsl,preserve-clocking", NULL)) {
+ if (of_get_property(op->dev.of_node, "fsl,preserve-clocking", NULL)) {
clock = MPC_I2C_CLOCK_PRESERVE;
} else {
- prop = of_get_property(op->node, "clock-frequency", &plen);
+ prop = of_get_property(op->dev.of_node, "clock-frequency",
+ &plen);
if (prop && plen == sizeof(u32))
clock = *prop;
}
if (match->data) {
struct mpc_i2c_data *data = match->data;
- data->setup(op->node, i2c, clock, data->prescaler);
+ data->setup(op->dev.of_node, i2c, clock, data->prescaler);
} else {
/* Backwards compatibility */
- if (of_get_property(op->node, "dfsrr", NULL))
- mpc_i2c_setup_8xxx(op->node, i2c, clock, 0);
+ if (of_get_property(op->dev.of_node, "dfsrr", NULL))
+ mpc_i2c_setup_8xxx(op->dev.of_node, i2c, clock, 0);
}
dev_set_drvdata(&op->dev, i2c);
@@ -605,7 +606,7 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
dev_err(i2c->dev, "failed to add adapter\n");
goto fail_add;
}
- of_register_i2c_devices(&i2c->adap, op->node);
+ of_register_i2c_devices(&i2c->adap, op->dev.of_node);
return result;
@@ -674,12 +675,12 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
/* Structure for a device driver */
static struct of_platform_driver mpc_i2c_driver = {
- .match_table = mpc_i2c_of_match,
.probe = fsl_i2c_probe,
.remove = __devexit_p(fsl_i2c_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .of_match_table = mpc_i2c_of_match,
},
};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index db3c9f3a7647..1cca2631e5b3 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -47,7 +47,6 @@ static DEFINE_MUTEX(core_lock);
static DEFINE_IDR(i2c_adapter_idr);
static struct device_type i2c_client_type;
-static int i2c_check_addr(struct i2c_adapter *adapter, int addr);
static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
/* ------------------------------------------------------------------------- */
@@ -371,6 +370,59 @@ struct i2c_client *i2c_verify_client(struct device *dev)
EXPORT_SYMBOL(i2c_verify_client);
+/* This is a permissive address validity check, I2C address map constraints
+ * are purposedly not enforced, except for the general call address. */
+static int i2c_check_client_addr_validity(const struct i2c_client *client)
+{
+ if (client->flags & I2C_CLIENT_TEN) {
+ /* 10-bit address, all values are valid */
+ if (client->addr > 0x3ff)
+ return -EINVAL;
+ } else {
+ /* 7-bit address, reject the general call address */
+ if (client->addr == 0x00 || client->addr > 0x7f)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* And this is a strict address validity check, used when probing. If a
+ * device uses a reserved address, then it shouldn't be probed. 7-bit
+ * addressing is assumed, 10-bit address devices are rare and should be
+ * explicitly enumerated. */
+static int i2c_check_addr_validity(unsigned short addr)
+{
+ /*
+ * Reserved addresses per I2C specification:
+ * 0x00 General call address / START byte
+ * 0x01 CBUS address
+ * 0x02 Reserved for different bus format
+ * 0x03 Reserved for future purposes
+ * 0x04-0x07 Hs-mode master code
+ * 0x78-0x7b 10-bit slave addressing
+ * 0x7c-0x7f Reserved for future purposes
+ */
+ if (addr < 0x08 || addr > 0x77)
+ return -EINVAL;
+ return 0;
+}
+
+static int __i2c_check_addr_busy(struct device *dev, void *addrp)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ int addr = *(int *)addrp;
+
+ if (client && client->addr == addr)
+ return -EBUSY;
+ return 0;
+}
+
+static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
+{
+ return device_for_each_child(&adapter->dev, &addr,
+ __i2c_check_addr_busy);
+}
+
/**
* i2c_new_device - instantiate an i2c device
* @adap: the adapter managing the device
@@ -410,14 +462,25 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
strlcpy(client->name, info->type, sizeof(client->name));
+ /* Check for address validity */
+ status = i2c_check_client_addr_validity(client);
+ if (status) {
+ dev_err(&adap->dev, "Invalid %d-bit I2C address 0x%02hx\n",
+ client->flags & I2C_CLIENT_TEN ? 10 : 7, client->addr);
+ goto out_err_silent;
+ }
+
/* Check for address business */
- status = i2c_check_addr(adap, client->addr);
+ status = i2c_check_addr_busy(adap, client->addr);
if (status)
goto out_err;
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
+#ifdef CONFIG_OF
+ client->dev.of_node = info->of_node;
+#endif
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
client->addr);
@@ -433,6 +496,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
out_err:
dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x "
"(%d)\n", client->name, client->addr, status);
+out_err_silent:
kfree(client);
return NULL;
}
@@ -558,15 +622,9 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- if (info.addr < 0x03 || info.addr > 0x77) {
- dev_err(dev, "%s: Invalid I2C address 0x%hx\n", "new_device",
- info.addr);
- return -EINVAL;
- }
-
client = i2c_new_device(adap, &info);
if (!client)
- return -EEXIST;
+ return -EINVAL;
/* Keep track of the added device */
i2c_lock_adapter(adap);
@@ -1021,21 +1079,6 @@ EXPORT_SYMBOL(i2c_del_driver);
/* ------------------------------------------------------------------------- */
-static int __i2c_check_addr(struct device *dev, void *addrp)
-{
- struct i2c_client *client = i2c_verify_client(dev);
- int addr = *(int *)addrp;
-
- if (client && client->addr == addr)
- return -EBUSY;
- return 0;
-}
-
-static int i2c_check_addr(struct i2c_adapter *adapter, int addr)
-{
- return device_for_each_child(&adapter->dev, &addr, __i2c_check_addr);
-}
-
/**
* i2c_use_client - increments the reference count of the i2c client structure
* @client: the client being referenced
@@ -1274,6 +1317,41 @@ EXPORT_SYMBOL(i2c_master_recv);
* ----------------------------------------------------
*/
+/*
+ * Legacy default probe function, mostly relevant for SMBus. The default
+ * probe method is a quick write, but it is known to corrupt the 24RF08
+ * EEPROMs due to a state machine bug, and could also irreversibly
+ * write-protect some EEPROMs, so for address ranges 0x30-0x37 and 0x50-0x5f,
+ * we use a short byte read instead. Also, some bus drivers don't implement
+ * quick write, so we fallback to a byte read in that case too.
+ * On x86, there is another special case for FSC hardware monitoring chips,
+ * which want regular byte reads (address 0x73.) Fortunately, these are the
+ * only known chips using this I2C address on PC hardware.
+ * Returns 1 if probe succeeded, 0 if not.
+ */
+static int i2c_default_probe(struct i2c_adapter *adap, unsigned short addr)
+{
+ int err;
+ union i2c_smbus_data dummy;
+
+#ifdef CONFIG_X86
+ if (addr == 0x73 && (adap->class & I2C_CLASS_HWMON)
+ && i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0,
+ I2C_SMBUS_BYTE_DATA, &dummy);
+ else
+#endif
+ if ((addr & ~0x07) == 0x30 || (addr & ~0x0f) == 0x50
+ || !i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK))
+ err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0,
+ I2C_SMBUS_BYTE, &dummy);
+ else
+ err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_WRITE, 0,
+ I2C_SMBUS_QUICK, NULL);
+
+ return err >= 0;
+}
+
static int i2c_detect_address(struct i2c_client *temp_client,
struct i2c_driver *driver)
{
@@ -1283,34 +1361,20 @@ static int i2c_detect_address(struct i2c_client *temp_client,
int err;
/* Make sure the address is valid */
- if (addr < 0x03 || addr > 0x77) {
+ err = i2c_check_addr_validity(addr);
+ if (err) {
dev_warn(&adapter->dev, "Invalid probe address 0x%02x\n",
addr);
- return -EINVAL;
+ return err;
}
/* Skip if already in use */
- if (i2c_check_addr(adapter, addr))
+ if (i2c_check_addr_busy(adapter, addr))
return 0;
/* Make sure there is something at this address */
- if (addr == 0x73 && (adapter->class & I2C_CLASS_HWMON)) {
- /* Special probe for FSC hwmon chips */
- union i2c_smbus_data dummy;
-
- if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_READ, 0,
- I2C_SMBUS_BYTE_DATA, &dummy) < 0)
- return 0;
- } else {
- if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
- I2C_SMBUS_QUICK, NULL) < 0)
- return 0;
-
- /* Prevent 24RF08 corruption */
- if ((addr & ~0x0f) == 0x50)
- i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
- I2C_SMBUS_QUICK, NULL);
- }
+ if (!i2c_default_probe(adapter, addr))
+ return 0;
/* Finally call the custom detection function */
memset(&info, 0, sizeof(struct i2c_board_info));
@@ -1404,42 +1468,22 @@ i2c_new_probed_device(struct i2c_adapter *adap,
for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) {
/* Check address validity */
- if (addr_list[i] < 0x03 || addr_list[i] > 0x77) {
+ if (i2c_check_addr_validity(addr_list[i]) < 0) {
dev_warn(&adap->dev, "Invalid 7-bit address "
"0x%02x\n", addr_list[i]);
continue;
}
/* Check address availability */
- if (i2c_check_addr(adap, addr_list[i])) {
+ if (i2c_check_addr_busy(adap, addr_list[i])) {
dev_dbg(&adap->dev, "Address 0x%02x already in "
"use, not probing\n", addr_list[i]);
continue;
}
- /* Test address responsiveness
- The default probe method is a quick write, but it is known
- to corrupt the 24RF08 EEPROMs due to a state machine bug,
- and could also irreversibly write-protect some EEPROMs, so
- for address ranges 0x30-0x37 and 0x50-0x5f, we use a byte
- read instead. Also, some bus drivers don't implement
- quick write, so we fallback to a byte read it that case
- too. */
- if ((addr_list[i] & ~0x07) == 0x30
- || (addr_list[i] & ~0x0f) == 0x50
- || !i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK)) {
- union i2c_smbus_data data;
-
- if (i2c_smbus_xfer(adap, addr_list[i], 0,
- I2C_SMBUS_READ, 0,
- I2C_SMBUS_BYTE, &data) >= 0)
- break;
- } else {
- if (i2c_smbus_xfer(adap, addr_list[i], 0,
- I2C_SMBUS_WRITE, 0,
- I2C_SMBUS_QUICK, NULL) >= 0)
- break;
- }
+ /* Test address responsiveness */
+ if (i2c_default_probe(adap, addr_list[i]))
+ break;
}
if (addr_list[i] == I2C_CLIENT_END) {
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index a24e0bfe9201..f61ccc1e5ea3 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -173,7 +173,6 @@ static int smbalert_remove(struct i2c_client *ara)
cancel_work_sync(&alert->alert);
- i2c_set_clientdata(ara, NULL);
kfree(alert);
return 0;
}
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index d2b8b272bc27..cb10201a15ed 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -633,12 +633,10 @@ static void __init cmd640_init_dev(ide_drive_t *drive)
static int cmd640_test_irq(ide_hwif_t *hwif)
{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
int irq_reg = hwif->channel ? ARTTIM23 : CFR;
- u8 irq_stat, irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
+ u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
CFR_IDE01INTR;
-
- pci_read_config_byte(dev, irq_reg, &irq_stat);
+ u8 irq_stat = get_cmd640_reg(irq_reg);
return (irq_stat & irq_mask) ? 1 : 0;
}
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index b9e517de6a82..3feaa26410be 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/zorro.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
@@ -24,15 +25,6 @@
/*
- * Bases of the IDE interfaces
- */
-
-#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */
-#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */
-
-#define GAYLE_IDEREG_SIZE 0x2000
-
- /*
* Offsets from one of the above bases
*/
@@ -68,20 +60,20 @@ MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
static int gayle_test_irq(ide_hwif_t *hwif)
{
- unsigned char ch;
+ unsigned char ch;
- ch = z_readb(hwif->io_ports.irq_addr);
- if (!(ch & GAYLE_IRQ_IDE))
- return 0;
- return 1;
+ ch = z_readb(hwif->io_ports.irq_addr);
+ if (!(ch & GAYLE_IRQ_IDE))
+ return 0;
+ return 1;
}
static void gayle_a1200_clear_irq(ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
+ ide_hwif_t *hwif = drive->hwif;
- (void)z_readb(hwif->io_ports.status_addr);
- z_writeb(0x7c, hwif->io_ports.irq_addr);
+ (void)z_readb(hwif->io_ports.status_addr);
+ z_writeb(0x7c, hwif->io_ports.irq_addr);
}
static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base,
@@ -122,64 +114,89 @@ static const struct ide_port_info gayle_port_info = {
* Probe for a Gayle IDE interface (and optionally for an IDE doubler)
*/
-static int __init gayle_init(void)
+static int __init amiga_gayle_ide_probe(struct platform_device *pdev)
{
- unsigned long phys_base, res_start, res_n;
- unsigned long base, ctrlport, irqport;
- int a4000, i, rc;
- struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
- struct ide_port_info d = gayle_port_info;
-
- if (!MACH_IS_AMIGA)
- return -ENODEV;
-
- if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE))
- goto found;
-
-#ifdef CONFIG_ZORRO
- if (zorro_find_device(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE,
- NULL))
- goto found;
-#endif
- return -ENODEV;
-
-found:
- printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n",
- a4000 ? 4000 : 1200,
- ide_doubler ? ", IDE doubler" : "");
-
- if (a4000) {
- phys_base = GAYLE_BASE_4000;
- irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000);
- d.port_ops = &gayle_a4000_port_ops;
- } else {
- phys_base = GAYLE_BASE_1200;
- irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_1200);
- d.port_ops = &gayle_a1200_port_ops;
+ struct resource *res;
+ struct gayle_ide_platform_data *pdata;
+ unsigned long base, ctrlport, irqport;
+ unsigned int i;
+ int error;
+ struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
+ struct ide_port_info d = gayle_port_info;
+ struct ide_host *host;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), "IDE"))
+ return -EBUSY;
+
+ pdata = pdev->dev.platform_data;
+ pr_info("ide: Gayle IDE controller (A%u style%s)\n",
+ pdata->explicit_ack ? 1200 : 4000,
+ ide_doubler ? ", IDE doubler" : "");
+
+ base = (unsigned long)ZTWO_VADDR(pdata->base);
+ ctrlport = 0;
+ irqport = (unsigned long)ZTWO_VADDR(pdata->irqport);
+ if (pdata->explicit_ack)
+ d.port_ops = &gayle_a1200_port_ops;
+ else
+ d.port_ops = &gayle_a4000_port_ops;
+
+ for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++, base += GAYLE_NEXT_PORT) {
+ if (GAYLE_HAS_CONTROL_REG)
+ ctrlport = base + GAYLE_CONTROL;
+
+ gayle_setup_ports(&hw[i], base, ctrlport, irqport);
+ hws[i] = &hw[i];
}
- res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1);
- res_n = GAYLE_IDEREG_SIZE;
+ error = ide_host_add(&d, hws, i, &host);
+ if (error)
+ goto out;
- if (!request_mem_region(res_start, res_n, "IDE"))
- return -EBUSY;
+ platform_set_drvdata(pdev, host);
+ return 0;
- for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
- base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT);
- ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0;
+out:
+ release_mem_region(res->start, resource_size(res));
+ return error;
+}
+
+static int __exit amiga_gayle_ide_remove(struct platform_device *pdev)
+{
+ struct ide_host *host = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ ide_host_remove(host);
+ release_mem_region(res->start, resource_size(res));
+ return 0;
+}
- gayle_setup_ports(&hw[i], base, ctrlport, irqport);
+static struct platform_driver amiga_gayle_ide_driver = {
+ .remove = __exit_p(amiga_gayle_ide_remove),
+ .driver = {
+ .name = "amiga-gayle-ide",
+ .owner = THIS_MODULE,
+ },
+};
- hws[i] = &hw[i];
- }
+static int __init amiga_gayle_ide_init(void)
+{
+ return platform_driver_probe(&amiga_gayle_ide_driver,
+ amiga_gayle_ide_probe);
+}
- rc = ide_host_add(&d, hws, i, NULL);
- if (rc)
- release_mem_region(res_start, res_n);
+module_init(amiga_gayle_ide_init);
- return rc;
+static void __exit amiga_gayle_ide_exit(void)
+{
+ platform_driver_unregister(&amiga_gayle_ide_driver);
}
-module_init(gayle_init);
+module_exit(amiga_gayle_ide_exit);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-gayle-ide");
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 3b128dce9c3a..33d65039cce9 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -407,32 +407,24 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
return 0;
}
-static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity)
+static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
{
- u64 set = min(capacity, drive->probed_capacity);
u16 *id = drive->id;
int lba48 = ata_id_lba48_enabled(id);
if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
ata_id_hpa_enabled(id) == 0)
- goto out;
+ return;
/*
* according to the spec the SET MAX ADDRESS command shall be
* immediately preceded by a READ NATIVE MAX ADDRESS command
*/
- capacity = ide_disk_hpa_get_native_capacity(drive, lba48);
- if (capacity == 0)
- goto out;
-
- set = ide_disk_hpa_set_capacity(drive, set, lba48);
- if (set) {
- /* needed for ->resume to disable HPA */
- drive->dev_flags |= IDE_DFLAG_NOHPA;
- return set;
- }
-out:
- return drive->capacity64;
+ if (!ide_disk_hpa_get_native_capacity(drive, lba48))
+ return;
+
+ if (ide_disk_hpa_set_capacity(drive, drive->probed_capacity, lba48))
+ drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
}
static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
@@ -783,13 +775,13 @@ static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
}
const struct ide_disk_ops ide_ata_disk_ops = {
- .check = ide_disk_check,
- .set_capacity = ide_disk_set_capacity,
- .get_capacity = ide_disk_get_capacity,
- .setup = ide_disk_setup,
- .flush = ide_disk_flush,
- .init_media = ide_disk_init_media,
- .set_doorlock = ide_disk_set_doorlock,
- .do_request = ide_do_rw_disk,
- .ioctl = ide_disk_ioctl,
+ .check = ide_disk_check,
+ .unlock_native_capacity = ide_disk_unlock_native_capacity,
+ .get_capacity = ide_disk_get_capacity,
+ .setup = ide_disk_setup,
+ .flush = ide_disk_flush,
+ .init_media = ide_disk_init_media,
+ .set_doorlock = ide_disk_set_doorlock,
+ .do_request = ide_do_rw_disk,
+ .ioctl = ide_disk_ioctl,
};
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 27d9fe08d80b..79399534782c 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -288,17 +288,14 @@ static int ide_gd_media_changed(struct gendisk *disk)
return ret;
}
-static unsigned long long ide_gd_set_capacity(struct gendisk *disk,
- unsigned long long capacity)
+static void ide_gd_unlock_native_capacity(struct gendisk *disk)
{
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
ide_drive_t *drive = idkp->drive;
const struct ide_disk_ops *disk_ops = drive->disk_ops;
- if (disk_ops->set_capacity)
- return disk_ops->set_capacity(drive, capacity);
-
- return drive->capacity64;
+ if (disk_ops->unlock_native_capacity)
+ disk_ops->unlock_native_capacity(drive);
}
static int ide_gd_revalidate_disk(struct gendisk *disk)
@@ -329,7 +326,7 @@ static const struct block_device_operations ide_gd_ops = {
.locked_ioctl = ide_gd_ioctl,
.getgeo = ide_gd_getgeo,
.media_changed = ide_gd_media_changed,
- .set_capacity = ide_gd_set_capacity,
+ .unlock_native_capacity = ide_gd_unlock_native_capacity,
.revalidate_disk = ide_gd_revalidate_disk
};
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 42965b3e30b9..542603b394e4 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -95,6 +95,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
hw.dev = &pdev->dev;
+ d.irq_flags = res_irq->flags;
if (mmio)
d.host_flags |= IDE_HFLAG_MMIO;
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index c5f3841af360..3a35ec6193d2 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -93,13 +93,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif)
* bit 7: error, bit 6: interrupting,
* bit 5: FIFO full, bit 4: FIFO empty
*/
- return ((sc1d & 0x50) == 0x50) ? 1 : 0;
+ return (sc1d & 0x40) ? 1 : 0;
} else {
/*
* bit 3: error, bit 2: interrupting,
* bit 1: FIFO full, bit 0: FIFO empty
*/
- return ((sc1d & 0x05) == 0x05) ? 1 : 0;
+ return (sc1d & 0x04) ? 1 : 0;
}
}
@@ -241,6 +241,7 @@ static const struct ide_port_ops pdc20246_port_ops = {
static const struct ide_port_ops pdc2026x_port_ops = {
.set_pio_mode = pdc202xx_set_pio_mode,
.set_dma_mode = pdc202xx_set_mode,
+ .test_irq = pdc202xx_test_irq,
.cable_detect = pdc2026x_cable_detect,
};
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 159955d16c47..ebcf8e470a97 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1153,7 +1153,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
if (macio_resource_count(mdev) == 0) {
printk(KERN_WARNING "ide-pmac: no address for %s\n",
- mdev->ofdev.node->full_name);
+ mdev->ofdev.dev.of_node->full_name);
rc = -ENXIO;
goto out_free_pmif;
}
@@ -1161,7 +1161,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
/* Request memory resource for IO ports */
if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
- "%s!\n", mdev->ofdev.node->full_name);
+ "%s!\n", mdev->ofdev.dev.of_node->full_name);
rc = -EBUSY;
goto out_free_pmif;
}
@@ -1173,7 +1173,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
*/
if (macio_irq_count(mdev) == 0) {
printk(KERN_WARNING "ide-pmac: no intrs for device %s, using "
- "13\n", mdev->ofdev.node->full_name);
+ "13\n", mdev->ofdev.dev.of_node->full_name);
irq = irq_create_mapping(NULL, 13);
} else
irq = macio_irq(mdev, 0);
@@ -1182,7 +1182,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
regbase = (unsigned long) base;
pmif->mdev = mdev;
- pmif->node = mdev->ofdev.node;
+ pmif->node = mdev->ofdev.dev.of_node;
pmif->regbase = regbase;
pmif->irq = irq;
pmif->kauai_fcr = NULL;
@@ -1191,7 +1191,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
printk(KERN_WARNING "ide-pmac: can't request DMA "
"resource for %s!\n",
- mdev->ofdev.node->full_name);
+ mdev->ofdev.dev.of_node->full_name);
else
pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
} else
@@ -1400,8 +1400,11 @@ static struct of_device_id pmac_ide_macio_match[] =
static struct macio_driver pmac_ide_macio_driver =
{
- .name = "ide-pmac",
- .match_table = pmac_ide_macio_match,
+ .driver = {
+ .name = "ide-pmac",
+ .owner = THIS_MODULE,
+ .of_match_table = pmac_ide_macio_match,
+ },
.probe = pmac_ide_macio_attach,
.suspend = pmac_ide_macio_suspend,
.resume = pmac_ide_macio_resume,
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index f15e90a453d1..fb5c5186d4aa 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -1,3 +1,14 @@
+config INTEL_IDLE
+ tristate "Cpuidle Driver for Intel Processors"
+ depends on CPU_IDLE
+ depends on X86
+ depends on CPU_SUP_INTEL
+ depends on EXPERIMENTAL
+ help
+ Enable intel_idle, a cpuidle driver that includes knowledge of
+ native Intel hardware idle features. The acpi_idle driver
+ can be configured at the same time, in order to handle
+ processors intel_idle does not support.
menu "Memory power savings"
depends on X86_64
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
index 5f68fc377e21..23d295cf10f2 100644
--- a/drivers/idle/Makefile
+++ b/drivers/idle/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_I7300_IDLE) += i7300_idle.o
+obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
new file mode 100755
index 000000000000..54f0fb4cd5d2
--- /dev/null
+++ b/drivers/idle/intel_idle.c
@@ -0,0 +1,461 @@
+/*
+ * intel_idle.c - native hardware idle loop for modern Intel processors
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ * Len Brown <len.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * intel_idle is a cpuidle driver that loads on specific Intel processors
+ * in lieu of the legacy ACPI processor_idle driver. The intent is to
+ * make Linux more efficient on these processors, as intel_idle knows
+ * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
+ */
+
+/*
+ * Design Assumptions
+ *
+ * All CPUs have same idle states as boot CPU
+ *
+ * Chipset BM_STS (bus master status) bit is a NOP
+ * for preventing entry into deep C-stats
+ */
+
+/*
+ * Known limitations
+ *
+ * The driver currently initializes for_each_online_cpu() upon modprobe.
+ * It it unaware of subsequent processors hot-added to the system.
+ * This means that if you boot with maxcpus=n and later online
+ * processors above n, those processors will use C1 only.
+ *
+ * ACPI has a .suspend hack to turn off deep c-statees during suspend
+ * to avoid complications with the lapic timer workaround.
+ * Have not seen issues with suspend, but may need same workaround here.
+ *
+ * There is currently no kernel-based automatic probing/loading mechanism
+ * if the driver is built as a module.
+ */
+
+/* un-comment DEBUG to enable pr_debug() statements */
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/cpuidle.h>
+#include <linux/clockchips.h>
+#include <linux/hrtimer.h> /* ktime_get_real() */
+#include <trace/events/power.h>
+#include <linux/sched.h>
+
+#define INTEL_IDLE_VERSION "0.4"
+#define PREFIX "intel_idle: "
+
+#define MWAIT_SUBSTATE_MASK (0xf)
+#define MWAIT_CSTATE_MASK (0xf)
+#define MWAIT_SUBSTATE_SIZE (4)
+#define MWAIT_MAX_NUM_CSTATES 8
+#define CPUID_MWAIT_LEAF (5)
+#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
+#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
+
+static struct cpuidle_driver intel_idle_driver = {
+ .name = "intel_idle",
+ .owner = THIS_MODULE,
+};
+/* intel_idle.max_cstate=0 disables driver */
+static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
+static int power_policy = 7; /* 0 = max perf; 15 = max powersave */
+
+static unsigned int substates;
+static int (*choose_substate)(int);
+
+/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
+static unsigned int lapic_timer_reliable_states;
+
+static struct cpuidle_device *intel_idle_cpuidle_devices;
+static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
+
+static struct cpuidle_state *cpuidle_state_table;
+
+/*
+ * States are indexed by the cstate number,
+ * which is also the index into the MWAIT hint array.
+ * Thus C0 is a dummy.
+ */
+static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ { /* MWAIT C0 */ },
+ { /* MWAIT C1 */
+ .name = "NHM-C1",
+ .desc = "MWAIT 0x00",
+ .driver_data = (void *) 0x00,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 3,
+ .power_usage = 1000,
+ .target_residency = 6,
+ .enter = &intel_idle },
+ { /* MWAIT C2 */
+ .name = "NHM-C3",
+ .desc = "MWAIT 0x10",
+ .driver_data = (void *) 0x10,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 20,
+ .power_usage = 500,
+ .target_residency = 80,
+ .enter = &intel_idle },
+ { /* MWAIT C3 */
+ .name = "NHM-C6",
+ .desc = "MWAIT 0x20",
+ .driver_data = (void *) 0x20,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 200,
+ .power_usage = 350,
+ .target_residency = 800,
+ .enter = &intel_idle },
+};
+
+static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ { /* MWAIT C0 */ },
+ { /* MWAIT C1 */
+ .name = "ATM-C1",
+ .desc = "MWAIT 0x00",
+ .driver_data = (void *) 0x00,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .power_usage = 1000,
+ .target_residency = 4,
+ .enter = &intel_idle },
+ { /* MWAIT C2 */
+ .name = "ATM-C2",
+ .desc = "MWAIT 0x10",
+ .driver_data = (void *) 0x10,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 20,
+ .power_usage = 500,
+ .target_residency = 80,
+ .enter = &intel_idle },
+ { /* MWAIT C3 */ },
+ { /* MWAIT C4 */
+ .name = "ATM-C4",
+ .desc = "MWAIT 0x30",
+ .driver_data = (void *) 0x30,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 100,
+ .power_usage = 250,
+ .target_residency = 400,
+ .enter = &intel_idle },
+ { /* MWAIT C5 */ },
+ { /* MWAIT C6 */
+ .name = "ATM-C6",
+ .desc = "MWAIT 0x40",
+ .driver_data = (void *) 0x40,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 200,
+ .power_usage = 150,
+ .target_residency = 800,
+ .enter = NULL }, /* disabled */
+};
+
+/*
+ * choose_tunable_substate()
+ *
+ * Run-time decision on which C-state substate to invoke
+ * If power_policy = 0, choose shallowest substate (0)
+ * If power_policy = 15, choose deepest substate
+ * If power_policy = middle, choose middle substate etc.
+ */
+static int choose_tunable_substate(int cstate)
+{
+ unsigned int num_substates;
+ unsigned int substate_choice;
+
+ power_policy &= 0xF; /* valid range: 0-15 */
+ cstate &= 7; /* valid range: 0-7 */
+
+ num_substates = (substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK;
+
+ if (num_substates <= 1)
+ return 0;
+
+ substate_choice = ((power_policy + (power_policy + 1) *
+ (num_substates - 1)) / 16);
+
+ return substate_choice;
+}
+
+/*
+ * choose_zero_substate()
+ */
+static int choose_zero_substate(int cstate)
+{
+ return 0;
+}
+
+/**
+ * intel_idle
+ * @dev: cpuidle_device
+ * @state: cpuidle state
+ *
+ */
+static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
+{
+ unsigned long ecx = 1; /* break on interrupt flag */
+ unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
+ unsigned int cstate;
+ ktime_t kt_before, kt_after;
+ s64 usec_delta;
+ int cpu = smp_processor_id();
+
+ cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
+
+ eax = eax + (choose_substate)(cstate);
+
+ local_irq_disable();
+
+ if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+
+ kt_before = ktime_get_real();
+
+ stop_critical_timings();
+#ifndef MODULE
+ trace_power_start(POWER_CSTATE, (eax >> 4) + 1);
+#endif
+ if (!need_resched()) {
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __mwait(eax, ecx);
+ }
+
+ start_critical_timings();
+
+ kt_after = ktime_get_real();
+ usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before));
+
+ local_irq_enable();
+
+ if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+
+ return usec_delta;
+}
+
+/*
+ * intel_idle_probe()
+ */
+static int intel_idle_probe(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ if (max_cstate == 0) {
+ pr_debug(PREFIX "disabled\n");
+ return -EPERM;
+ }
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENODEV;
+
+ if (!boot_cpu_has(X86_FEATURE_MWAIT))
+ return -ENODEV;
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return -ENODEV;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ return -ENODEV;
+#ifdef DEBUG
+ if (substates == 0) /* can over-ride via modparam */
+#endif
+ substates = edx;
+
+ pr_debug(PREFIX "MWAIT substates: 0x%x\n", substates);
+
+ if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
+ lapic_timer_reliable_states = 0xFFFFFFFF;
+
+ if (boot_cpu_data.x86 != 6) /* family 6 */
+ return -ENODEV;
+
+ switch (boot_cpu_data.x86_model) {
+
+ case 0x1A: /* Core i7, Xeon 5500 series */
+ case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */
+ case 0x1F: /* Core i7 and i5 Processor - Nehalem */
+ case 0x2E: /* Nehalem-EX Xeon */
+ lapic_timer_reliable_states = (1 << 1); /* C1 */
+
+ case 0x25: /* Westmere */
+ case 0x2C: /* Westmere */
+ cpuidle_state_table = nehalem_cstates;
+ choose_substate = choose_tunable_substate;
+ break;
+
+ case 0x1C: /* 28 - Atom Processor */
+ lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
+ cpuidle_state_table = atom_cstates;
+ choose_substate = choose_zero_substate;
+ break;
+#ifdef FUTURE_USE
+ case 0x17: /* 23 - Core 2 Duo */
+ lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
+#endif
+
+ default:
+ pr_debug(PREFIX "does not run on family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ return -ENODEV;
+ }
+
+ pr_debug(PREFIX "v" INTEL_IDLE_VERSION
+ " model 0x%X\n", boot_cpu_data.x86_model);
+
+ pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
+ lapic_timer_reliable_states);
+ return 0;
+}
+
+/*
+ * intel_idle_cpuidle_devices_uninit()
+ * unregister, free cpuidle_devices
+ */
+static void intel_idle_cpuidle_devices_uninit(void)
+{
+ int i;
+ struct cpuidle_device *dev;
+
+ for_each_online_cpu(i) {
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+ cpuidle_unregister_device(dev);
+ }
+
+ free_percpu(intel_idle_cpuidle_devices);
+ return;
+}
+/*
+ * intel_idle_cpuidle_devices_init()
+ * allocate, initialize, register cpuidle_devices
+ */
+static int intel_idle_cpuidle_devices_init(void)
+{
+ int i, cstate;
+ struct cpuidle_device *dev;
+
+ intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (intel_idle_cpuidle_devices == NULL)
+ return -ENOMEM;
+
+ for_each_online_cpu(i) {
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+
+ dev->state_count = 1;
+
+ for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
+ int num_substates;
+
+ if (cstate > max_cstate) {
+ printk(PREFIX "max_cstate %d reached\n",
+ max_cstate);
+ break;
+ }
+
+ /* does the state exist in CPUID.MWAIT? */
+ num_substates = (substates >> ((cstate) * 4))
+ & MWAIT_SUBSTATE_MASK;
+ if (num_substates == 0)
+ continue;
+ /* is the state not enabled? */
+ if (cpuidle_state_table[cstate].enter == NULL) {
+ /* does the driver not know about the state? */
+ if (*cpuidle_state_table[cstate].name == '\0')
+ pr_debug(PREFIX "unaware of model 0x%x"
+ " MWAIT %d please"
+ " contact lenb@kernel.org",
+ boot_cpu_data.x86_model, cstate);
+ continue;
+ }
+
+ if ((cstate > 2) &&
+ !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle"
+ " states deeper than C2");
+
+ dev->states[dev->state_count] = /* structure copy */
+ cpuidle_state_table[cstate];
+
+ dev->state_count += 1;
+ }
+
+ dev->cpu = i;
+ if (cpuidle_register_device(dev)) {
+ pr_debug(PREFIX "cpuidle_register_device %d failed!\n",
+ i);
+ intel_idle_cpuidle_devices_uninit();
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+
+static int __init intel_idle_init(void)
+{
+ int retval;
+
+ retval = intel_idle_probe();
+ if (retval)
+ return retval;
+
+ retval = cpuidle_register_driver(&intel_idle_driver);
+ if (retval) {
+ printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
+ cpuidle_get_driver()->name);
+ return retval;
+ }
+
+ retval = intel_idle_cpuidle_devices_init();
+ if (retval) {
+ cpuidle_unregister_driver(&intel_idle_driver);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void __exit intel_idle_exit(void)
+{
+ intel_idle_cpuidle_devices_uninit();
+ cpuidle_unregister_driver(&intel_idle_driver);
+
+ return;
+}
+
+module_init(intel_idle_init);
+module_exit(intel_idle_exit);
+
+module_param(power_policy, int, 0644);
+module_param(max_cstate, int, 0444);
+#ifdef DEBUG
+module_param(substates, int, 0444);
+#endif
+
+MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
+MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 9fd4a0d3206e..adaefabc40e9 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1824,7 +1824,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
"and will not be available in the new firewire driver stack. "
"Try libraw1394 based programs instead.\n", current->comm);
- return 0;
+ return nonseekable_open(inode, file);
}
@@ -2153,17 +2153,18 @@ static struct cdev dv1394_cdev;
static const struct file_operations dv1394_fops=
{
.owner = THIS_MODULE,
- .poll = dv1394_poll,
+ .poll = dv1394_poll,
.unlocked_ioctl = dv1394_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = dv1394_compat_ioctl,
#endif
.mmap = dv1394_mmap,
.open = dv1394_open,
- .write = dv1394_write,
- .read = dv1394_read,
+ .write = dv1394_write,
+ .read = dv1394_read,
.release = dv1394_release,
- .fasync = dv1394_fasync,
+ .fasync = dv1394_fasync,
+ .llseek = no_llseek,
};
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 8aa56ac07e29..b563d5e9fa2e 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2834,7 +2834,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
file->private_data = fi;
- return 0;
+ return nonseekable_open(inode, file);
}
static int raw1394_release(struct inode *inode, struct file *file)
@@ -3035,6 +3035,7 @@ static const struct file_operations raw1394_fops = {
.poll = raw1394_poll,
.open = raw1394_open,
.release = raw1394_release,
+ .llseek = no_llseek,
};
static int __init init_raw1394(void)
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 949064a05675..a42bd6893bcf 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1239,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file)
ctx->current_ctx = NULL;
file->private_data = ctx;
- return 0;
+ return nonseekable_open(inode, file);
}
static int video1394_release(struct inode *inode, struct file *file)
@@ -1287,7 +1287,8 @@ static const struct file_operations video1394_fops=
.poll = video1394_poll,
.mmap = video1394_mmap,
.open = video1394_open,
- .release = video1394_release
+ .release = video1394_release,
+ .llseek = no_llseek,
};
/*** HOTPLUG STUFF **********************************************************/
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 330d2a423362..89d70de5e235 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -43,6 +43,7 @@ config INFINIBAND_ADDR_TRANS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ipath/Kconfig"
+source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 0c4e589d746e..9cc7a47d3e67 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
+obj-$(CONFIG_INFINIBAND_QIB) += hw/qib/
obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 05ac36e6acdb..a565af5c2d2e 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -38,7 +38,9 @@
#include <rdma/ib_verbs.h>
-int ib_device_register_sysfs(struct ib_device *device);
+int ib_device_register_sysfs(struct ib_device *device,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *));
void ib_device_unregister_sysfs(struct ib_device *device);
int ib_sysfs_setup(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index d1fba4153332..a19effad0811 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -267,7 +267,9 @@ out:
* callback for each device that is added. @device must be allocated
* with ib_alloc_device().
*/
-int ib_register_device(struct ib_device *device)
+int ib_register_device(struct ib_device *device,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *))
{
int ret;
@@ -296,7 +298,7 @@ int ib_register_device(struct ib_device *device)
goto out;
}
- ret = ib_device_register_sysfs(device);
+ ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
printk(KERN_WARNING "Couldn't register device %s with driver model\n",
device->name);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 6dc7b77d5d29..ef1304f151dc 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -47,8 +47,8 @@ MODULE_DESCRIPTION("kernel IB MAD API");
MODULE_AUTHOR("Hal Rosenstock");
MODULE_AUTHOR("Sean Hefty");
-int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
-int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
+static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
+static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
module_param_named(send_queue_size, mad_sendq_size, int, 0444);
MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index f901957abc8b..3627300e2a10 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -475,7 +475,9 @@ err:
return NULL;
}
-static int add_port(struct ib_device *device, int port_num)
+static int add_port(struct ib_device *device, int port_num,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *))
{
struct ib_port *p;
struct ib_port_attr attr;
@@ -522,11 +524,20 @@ static int add_port(struct ib_device *device, int port_num)
if (ret)
goto err_free_pkey;
+ if (port_callback) {
+ ret = port_callback(device, port_num, &p->kobj);
+ if (ret)
+ goto err_remove_pkey;
+ }
+
list_add_tail(&p->kobj.entry, &device->port_list);
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
+err_remove_pkey:
+ sysfs_remove_group(&p->kobj, &p->pkey_group);
+
err_free_pkey:
for (i = 0; i < attr.pkey_tbl_len; ++i)
kfree(p->pkey_group.attrs[i]);
@@ -754,7 +765,9 @@ static struct attribute_group iw_stats_group = {
.attrs = iw_proto_stats_attrs,
};
-int ib_device_register_sysfs(struct ib_device *device)
+int ib_device_register_sysfs(struct ib_device *device,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *))
{
struct device *class_dev = &device->dev;
int ret;
@@ -785,12 +798,12 @@ int ib_device_register_sysfs(struct ib_device *device)
}
if (device->node_type == RDMA_NODE_IB_SWITCH) {
- ret = add_port(device, 0);
+ ret = add_port(device, 0, port_callback);
if (ret)
goto err_put;
} else {
for (i = 1; i <= device->phys_port_cnt; ++i) {
- ret = add_port(device, i);
+ ret = add_port(device, i, port_callback);
if (ret)
goto err_put;
}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 46474842cfe9..08f948df8fa9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -706,14 +706,9 @@ static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len)
if (!len)
return 0;
- data = kmalloc(len, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (copy_from_user(data, (void __user *)(unsigned long)src, len)) {
- kfree(data);
- return -EFAULT;
- }
+ data = memdup_user((void __user *)(unsigned long)src, len);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
*dest = data;
return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index c47f618d12e8..aeebc4d37e33 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -865,7 +865,7 @@ int c2_register_device(struct c2_dev *dev)
dev->ibdev.iwcm->create_listen = c2_service_create;
dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
- ret = ib_register_device(&dev->ibdev);
+ ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto out_free_iwcm;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 19b1c4a62a23..fca0b4b747e4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1428,7 +1428,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
dev->ibdev.iwcm->get_qp = iwch_get_qp;
- ret = ib_register_device(&dev->ibdev);
+ ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto bail1;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index fb1aafcc294f..2447f5295482 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -373,6 +373,7 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
V_CQE_SWCQE(SW_CQE(hw_cqe)) |
V_CQE_OPCODE(FW_RI_READ_REQ) |
V_CQE_TYPE(1));
+ read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
}
/*
@@ -780,6 +781,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
/* account for the status page. */
entries++;
+ /* IQ needs one extra entry to differentiate full vs empty. */
+ entries++;
+
/*
* entries must be multiple of 16 for HW.
*/
@@ -801,7 +805,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
chp->rhp = rhp;
chp->cq.size--; /* status page */
- chp->ibcq.cqe = chp->cq.size;
+ chp->ibcq.cqe = chp->cq.size - 1;
spin_lock_init(&chp->lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index be23b5eab13b..d870f9c17c1e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -306,7 +306,8 @@ static void c4iw_remove(struct c4iw_dev *dev)
PDBG("%s c4iw_dev %p\n", __func__, dev);
cancel_delayed_work_sync(&dev->db_drop_task);
list_del(&dev->entry);
- c4iw_unregister_device(dev);
+ if (dev->registered)
+ c4iw_unregister_device(dev);
c4iw_rdev_close(&dev->rdev);
idr_destroy(&dev->cqidr);
idr_destroy(&dev->qpidr);
@@ -343,12 +344,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
list_add_tail(&devp->entry, &dev_list);
mutex_unlock(&dev_mutex);
- if (c4iw_register_device(devp)) {
- printk(KERN_ERR MOD "Unable to register device\n");
- mutex_lock(&dev_mutex);
- c4iw_remove(devp);
- mutex_unlock(&dev_mutex);
- }
if (c4iw_debugfs_root) {
devp->debugfs_root = debugfs_create_dir(
pci_name(devp->rdev.lldi.pdev),
@@ -379,9 +374,6 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
for (i = 0; i < dev->rdev.lldi.nrxq; i++)
PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
-
- printk(KERN_INFO MOD "Initialized device %s\n",
- pci_name(dev->rdev.lldi.pdev));
out:
return dev;
}
@@ -471,7 +463,41 @@ nomem:
static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
{
+ struct c4iw_dev *dev = handle;
+
PDBG("%s new_state %u\n", __func__, new_state);
+ switch (new_state) {
+ case CXGB4_STATE_UP:
+ printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev));
+ if (!dev->registered) {
+ int ret;
+ ret = c4iw_register_device(dev);
+ if (ret)
+ printk(KERN_ERR MOD
+ "%s: RDMA registration failed: %d\n",
+ pci_name(dev->rdev.lldi.pdev), ret);
+ }
+ break;
+ case CXGB4_STATE_DOWN:
+ printk(KERN_INFO MOD "%s: Down\n",
+ pci_name(dev->rdev.lldi.pdev));
+ if (dev->registered)
+ c4iw_unregister_device(dev);
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ printk(KERN_INFO MOD "%s: Fatal Error\n",
+ pci_name(dev->rdev.lldi.pdev));
+ if (dev->registered)
+ c4iw_unregister_device(dev);
+ break;
+ case CXGB4_STATE_DETACH:
+ printk(KERN_INFO MOD "%s: Detach\n",
+ pci_name(dev->rdev.lldi.pdev));
+ mutex_lock(&dev_mutex);
+ c4iw_remove(dev);
+ mutex_unlock(&dev_mutex);
+ break;
+ }
return 0;
}
@@ -504,14 +530,12 @@ static void __exit c4iw_exit_module(void)
{
struct c4iw_dev *dev, *tmp;
- cxgb4_unregister_uld(CXGB4_ULD_RDMA);
-
mutex_lock(&dev_mutex);
list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
c4iw_remove(dev);
}
mutex_unlock(&dev_mutex);
-
+ cxgb4_unregister_uld(CXGB4_ULD_RDMA);
c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root);
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index a6269981e815..277ab589b44d 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -152,6 +152,7 @@ struct c4iw_dev {
struct list_head entry;
struct delayed_work db_drop_task;
struct dentry *debugfs_root;
+ u8 registered;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index e54ff6d25691..7f94da1a2437 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -712,8 +712,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
+ if (!mhp) {
+ ret = -ENOMEM;
goto err;
+ }
mhp->rhp = rhp;
ret = alloc_pbl(mhp, pbl_depth);
@@ -730,8 +732,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
mhp->attr.state = 1;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
+ if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ ret = -ENOMEM;
goto err3;
+ }
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmr);
@@ -755,9 +759,6 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
dma_addr_t dma_addr;
int size = sizeof *c4pl + page_list_len * sizeof(u64);
- if (page_list_len > T4_MAX_FR_DEPTH)
- return ERR_PTR(-EINVAL);
-
c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
&dma_addr, GFP_KERNEL);
if (!c4pl)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index dfc49020bb9c..8f645c83a125 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -486,7 +486,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
- ret = ib_register_device(&dev->ibdev);
+ ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto bail1;
@@ -496,6 +496,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
if (ret)
goto bail2;
}
+ dev->registered = 1;
return 0;
bail2:
ib_unregister_device(&dev->ibdev);
@@ -514,5 +515,6 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
c4iw_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
kfree(dev->ibdev.iwcm);
+ dev->registered = 0;
return;
}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index b321835dcf6e..646a2a5711f2 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -572,9 +572,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
err = build_rdma_write(wqe, wr, &len16);
break;
case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
fw_opcode = FW_RI_RDMA_READ_WR;
swsqe->opcode = FW_RI_READ_REQ;
- fw_flags = 0;
+ if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
+ fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
+ else
+ fw_flags = 0;
err = build_rdma_read(wqe, wr, &len16);
if (err)
break;
@@ -588,6 +592,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
err = build_fastreg(wqe, wr, &len16);
break;
case IB_WR_LOCAL_INV:
+ if (wr->send_flags & IB_SEND_FENCE)
+ fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
fw_opcode = FW_RI_INV_LSTAG_WR;
swsqe->opcode = FW_RI_LOCAL_INV;
err = build_inv_stag(wqe, wr, &len16);
@@ -1339,7 +1345,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
wait_event(qhp->wait, !qhp->ep);
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
- remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
@@ -1442,30 +1447,26 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (ret)
goto err2;
- ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid);
- if (ret)
- goto err3;
-
if (udata) {
mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
if (!mm1) {
ret = -ENOMEM;
- goto err4;
+ goto err3;
}
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
if (!mm2) {
ret = -ENOMEM;
- goto err5;
+ goto err4;
}
mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
if (!mm3) {
ret = -ENOMEM;
- goto err6;
+ goto err5;
}
mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
if (!mm4) {
ret = -ENOMEM;
- goto err7;
+ goto err6;
}
uresp.qid_mask = rhp->rdev.qpmask;
@@ -1487,7 +1488,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
if (ret)
- goto err8;
+ goto err7;
mm1->key = uresp.sq_key;
mm1->addr = virt_to_phys(qhp->wq.sq.queue);
mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
@@ -1511,16 +1512,14 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
__func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
qhp->wq.sq.qid);
return &qhp->ibqp;
-err8:
- kfree(mm4);
err7:
- kfree(mm3);
+ kfree(mm4);
err6:
- kfree(mm2);
+ kfree(mm3);
err5:
- kfree(mm1);
+ kfree(mm2);
err4:
- remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
+ kfree(mm1);
err3:
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
err2:
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index d0e8af352408..1057cb96302e 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -41,11 +41,13 @@
#define T4_MAX_NUM_QP (1<<16)
#define T4_MAX_NUM_CQ (1<<15)
#define T4_MAX_NUM_PD (1<<15)
-#define T4_MAX_PBL_SIZE 256
-#define T4_MAX_RQ_SIZE 1024
-#define T4_MAX_SQ_SIZE 1024
-#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1)
-#define T4_MAX_CQ_DEPTH 8192
+#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
+#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
+#define T4_MAX_IQ_SIZE (65520 - 1)
+#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
+#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
+#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
+#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
#define T4_MAX_NUM_STAG (1<<15)
#define T4_MAX_MR_SIZE (~0ULL - 1)
#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
@@ -79,12 +81,11 @@ struct t4_status_page {
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
sizeof(struct fw_ri_immd)))
-#define T4_MAX_FR_DEPTH 255
+#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
#define T4_RQ_NUM_SLOTS 2
#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
-#define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \
- sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
+#define T4_MAX_RECV_SGE 4
union t4_wr {
struct fw_ri_res_wr res;
@@ -434,7 +435,7 @@ struct t4_cq {
struct c4iw_rdev *rdev;
u64 ugts;
size_t memsize;
- u64 timestamp;
+ __be64 bits_type_ts;
u32 cqid;
u16 size; /* including status page */
u16 cidx;
@@ -449,25 +450,17 @@ struct t4_cq {
static inline int t4_arm_cq(struct t4_cq *cq, int se)
{
u32 val;
- u16 inc;
-
- do {
- /*
- * inc must be less the both the max update value -and-
- * the size of the CQ.
- */
- inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc :
- CIDXINC_MASK;
- inc = inc <= (cq->size - 1) ? inc : (cq->size - 1);
- if (inc == cq->cidx_inc)
- val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) |
- INGRESSQID(cq->cqid);
- else
- val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) |
- INGRESSQID(cq->cqid);
- cq->cidx_inc -= inc;
+
+ while (cq->cidx_inc > CIDXINC_MASK) {
+ val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
+ INGRESSQID(cq->cqid);
writel(val, cq->gts);
- } while (cq->cidx_inc);
+ cq->cidx_inc -= CIDXINC_MASK;
+ }
+ val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
+ INGRESSQID(cq->cqid);
+ writel(val, cq->gts);
+ cq->cidx_inc = 0;
return 0;
}
@@ -487,7 +480,9 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
static inline void t4_hwcq_consume(struct t4_cq *cq)
{
- cq->cidx_inc++;
+ cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
+ if (++cq->cidx_inc == cq->size)
+ cq->cidx_inc = 0;
if (++cq->cidx == cq->size) {
cq->cidx = 0;
cq->gen ^= 1;
@@ -501,20 +496,23 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
- int ret = 0;
- u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts);
+ int ret;
+ u16 prev_cidx;
- if (G_CQE_GENBIT(bits_type_ts) == cq->gen) {
- *cqe = &cq->queue[cq->cidx];
- cq->timestamp = G_CQE_TS(bits_type_ts);
- } else if (G_CQE_TS(bits_type_ts) > cq->timestamp)
- ret = -EOVERFLOW;
+ if (cq->cidx == 0)
+ prev_cidx = cq->size - 1;
else
- ret = -ENODATA;
- if (ret == -EOVERFLOW) {
- printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
+ prev_cidx = cq->cidx - 1;
+
+ if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
+ ret = -EOVERFLOW;
cq->error = 1;
- }
+ printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
+ } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
+ *cqe = &cq->queue[cq->cidx];
+ ret = 0;
+ } else
+ ret = -ENODATA;
return ret;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 07cae552cafb..e571e60ecb88 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -847,7 +847,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
if (!create_comp_task(pool, cpu)) {
ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
- return NOTIFY_BAD;
+ return notifier_from_errno(-ENOMEM);
}
break;
case CPU_UP_CANCELED:
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 129a6bebd6e3..ecb51b396c42 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -291,8 +291,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
};
ehca_gen_dbg("Probing adapter %s...",
- shca->ofdev->node->full_name);
- loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL);
+ shca->ofdev->dev.of_node->full_name);
+ loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
+ NULL);
if (loc_code)
ehca_gen_dbg(" ... location lode=%s", loc_code);
@@ -720,16 +721,16 @@ static int __devinit ehca_probe(struct of_device *dev,
int ret, i, eq_size;
unsigned long flags;
- handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
+ handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
if (!handle) {
ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
return -ENODEV;
}
if (!(*handle)) {
ehca_gen_err("Wrong eHCA handle for adapter: %s.",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
return -ENODEV;
}
@@ -798,7 +799,7 @@ static int __devinit ehca_probe(struct of_device *dev,
goto probe5;
}
- ret = ib_register_device(&shca->ib_device);
+ ret = ib_register_device(&shca->ib_device, NULL);
if (ret) {
ehca_err(&shca->ib_device,
"ib_register_device() failed ret=%i", ret);
@@ -936,12 +937,13 @@ static struct of_device_id ehca_device_table[] =
MODULE_DEVICE_TABLE(of, ehca_device_table);
static struct of_platform_driver ehca_driver = {
- .name = "ehca",
- .match_table = ehca_device_table,
.probe = ehca_probe,
.remove = ehca_remove,
- .driver = {
+ .driver = {
+ .name = "ehca",
+ .owner = THIS_MODULE,
.groups = ehca_drv_attr_groups,
+ .of_match_table = ehca_device_table,
},
};
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
index 3c7968f25ec2..1d9bb115cbf6 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -1,9 +1,11 @@
config INFINIBAND_IPATH
- tristate "QLogic InfiniPath Driver"
- depends on 64BIT && NET
+ tristate "QLogic HTX HCA support"
+ depends on 64BIT && NET && HT_IRQ
---help---
- This is a driver for QLogic InfiniPath host channel adapters,
+ This is a driver for the obsolete QLogic Hyper-Transport
+ IB host channel adapter (model QHT7140),
including InfiniBand verbs support. This driver allows these
devices to be used with both kernel upper level protocols such
as IP-over-InfiniBand as well as with userspace applications
(in conjunction with InfiniBand userspace access).
+ For QLogic PCIe QLE based cards, use the QIB driver instead.
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index bf9450061986..fa3df82681df 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -29,13 +29,9 @@ ib_ipath-y := \
ipath_user_pages.o \
ipath_user_sdma.o \
ipath_verbs_mcast.o \
- ipath_verbs.o \
- ipath_iba7220.o \
- ipath_sd7220.o \
- ipath_sd7220_img.o
+ ipath_verbs.o
ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
-ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 6302626d17f0..21337468c652 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -132,18 +132,13 @@ static int __devinit ipath_init_one(struct pci_dev *,
/* Only needed for registration, nothing else needs this info */
#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
-#define PCI_VENDOR_ID_QLOGIC 0x1077
#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
-#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
-#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
/* Number of seconds before our card status check... */
#define STATUS_TIMEOUT 60
static const struct pci_device_id ipath_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
- { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
- { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
{ 0, }
};
@@ -521,30 +516,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
/* setup the chip-specific functions, as early as possible. */
switch (ent->device) {
case PCI_DEVICE_ID_INFINIPATH_HT:
-#ifdef CONFIG_HT_IRQ
ipath_init_iba6110_funcs(dd);
break;
-#else
- ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
- "CONFIG_HT_IRQ is not enabled\n", ent->device);
- return -ENODEV;
-#endif
- case PCI_DEVICE_ID_INFINIPATH_PE800:
-#ifdef CONFIG_PCI_MSI
- ipath_init_iba6120_funcs(dd);
- break;
-#else
- ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
- "CONFIG_PCI_MSI is not enabled\n", ent->device);
- return -ENODEV;
-#endif
- case PCI_DEVICE_ID_INFINIPATH_7220:
-#ifndef CONFIG_PCI_MSI
- ipath_dbg("CONFIG_PCI_MSI is not enabled, "
- "using INTx for unit %u\n", dd->ipath_unit);
-#endif
- ipath_init_iba7220_funcs(dd);
- break;
+
default:
ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
"failing\n", ent->device);
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
deleted file mode 100644
index 4b4a30b0dabd..000000000000
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ /dev/null
@@ -1,1862 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath PCIe chip.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-
-static void ipath_setup_pe_setextled(struct ipath_devdata *, u64, u64);
-
-/*
- * This file contains all the chip-specific register information and
- * access functions for the QLogic InfiniPath PCI-Express chip.
- *
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- */
-struct _infinipath_do_not_use_kernel_regs {
- unsigned long long Revision;
- unsigned long long Control;
- unsigned long long PageAlign;
- unsigned long long PortCnt;
- unsigned long long DebugPortSelect;
- unsigned long long Reserved0;
- unsigned long long SendRegBase;
- unsigned long long UserRegBase;
- unsigned long long CounterRegBase;
- unsigned long long Scratch;
- unsigned long long Reserved1;
- unsigned long long Reserved2;
- unsigned long long IntBlocked;
- unsigned long long IntMask;
- unsigned long long IntStatus;
- unsigned long long IntClear;
- unsigned long long ErrorMask;
- unsigned long long ErrorStatus;
- unsigned long long ErrorClear;
- unsigned long long HwErrMask;
- unsigned long long HwErrStatus;
- unsigned long long HwErrClear;
- unsigned long long HwDiagCtrl;
- unsigned long long MDIO;
- unsigned long long IBCStatus;
- unsigned long long IBCCtrl;
- unsigned long long ExtStatus;
- unsigned long long ExtCtrl;
- unsigned long long GPIOOut;
- unsigned long long GPIOMask;
- unsigned long long GPIOStatus;
- unsigned long long GPIOClear;
- unsigned long long RcvCtrl;
- unsigned long long RcvBTHQP;
- unsigned long long RcvHdrSize;
- unsigned long long RcvHdrCnt;
- unsigned long long RcvHdrEntSize;
- unsigned long long RcvTIDBase;
- unsigned long long RcvTIDCnt;
- unsigned long long RcvEgrBase;
- unsigned long long RcvEgrCnt;
- unsigned long long RcvBufBase;
- unsigned long long RcvBufSize;
- unsigned long long RxIntMemBase;
- unsigned long long RxIntMemSize;
- unsigned long long RcvPartitionKey;
- unsigned long long Reserved3;
- unsigned long long RcvPktLEDCnt;
- unsigned long long Reserved4[8];
- unsigned long long SendCtrl;
- unsigned long long SendPIOBufBase;
- unsigned long long SendPIOSize;
- unsigned long long SendPIOBufCnt;
- unsigned long long SendPIOAvailAddr;
- unsigned long long TxIntMemBase;
- unsigned long long TxIntMemSize;
- unsigned long long Reserved5;
- unsigned long long PCIeRBufTestReg0;
- unsigned long long PCIeRBufTestReg1;
- unsigned long long Reserved51[6];
- unsigned long long SendBufferError;
- unsigned long long SendBufferErrorCONT1;
- unsigned long long Reserved6SBE[6];
- unsigned long long RcvHdrAddr0;
- unsigned long long RcvHdrAddr1;
- unsigned long long RcvHdrAddr2;
- unsigned long long RcvHdrAddr3;
- unsigned long long RcvHdrAddr4;
- unsigned long long Reserved7RHA[11];
- unsigned long long RcvHdrTailAddr0;
- unsigned long long RcvHdrTailAddr1;
- unsigned long long RcvHdrTailAddr2;
- unsigned long long RcvHdrTailAddr3;
- unsigned long long RcvHdrTailAddr4;
- unsigned long long Reserved8RHTA[11];
- unsigned long long Reserved9SW[8];
- unsigned long long SerdesConfig0;
- unsigned long long SerdesConfig1;
- unsigned long long SerdesStatus;
- unsigned long long XGXSConfig;
- unsigned long long IBPLLCfg;
- unsigned long long Reserved10SW2[3];
- unsigned long long PCIEQ0SerdesConfig0;
- unsigned long long PCIEQ0SerdesConfig1;
- unsigned long long PCIEQ0SerdesStatus;
- unsigned long long Reserved11;
- unsigned long long PCIEQ1SerdesConfig0;
- unsigned long long PCIEQ1SerdesConfig1;
- unsigned long long PCIEQ1SerdesStatus;
- unsigned long long Reserved12;
-};
-
-struct _infinipath_do_not_use_counters {
- __u64 LBIntCnt;
- __u64 LBFlowStallCnt;
- __u64 Reserved1;
- __u64 TxUnsupVLErrCnt;
- __u64 TxDataPktCnt;
- __u64 TxFlowPktCnt;
- __u64 TxDwordCnt;
- __u64 TxLenErrCnt;
- __u64 TxMaxMinLenErrCnt;
- __u64 TxUnderrunCnt;
- __u64 TxFlowStallCnt;
- __u64 TxDroppedPktCnt;
- __u64 RxDroppedPktCnt;
- __u64 RxDataPktCnt;
- __u64 RxFlowPktCnt;
- __u64 RxDwordCnt;
- __u64 RxLenErrCnt;
- __u64 RxMaxMinLenErrCnt;
- __u64 RxICRCErrCnt;
- __u64 RxVCRCErrCnt;
- __u64 RxFlowCtrlErrCnt;
- __u64 RxBadFormatCnt;
- __u64 RxLinkProblemCnt;
- __u64 RxEBPCnt;
- __u64 RxLPCRCErrCnt;
- __u64 RxBufOvflCnt;
- __u64 RxTIDFullErrCnt;
- __u64 RxTIDValidErrCnt;
- __u64 RxPKeyMismatchCnt;
- __u64 RxP0HdrEgrOvflCnt;
- __u64 RxP1HdrEgrOvflCnt;
- __u64 RxP2HdrEgrOvflCnt;
- __u64 RxP3HdrEgrOvflCnt;
- __u64 RxP4HdrEgrOvflCnt;
- __u64 RxP5HdrEgrOvflCnt;
- __u64 RxP6HdrEgrOvflCnt;
- __u64 RxP7HdrEgrOvflCnt;
- __u64 RxP8HdrEgrOvflCnt;
- __u64 Reserved6;
- __u64 Reserved7;
- __u64 IBStatusChangeCnt;
- __u64 IBLinkErrRecoveryCnt;
- __u64 IBLinkDownedCnt;
- __u64 IBSymbolErrCnt;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_pe_kregs = {
- .kr_control = IPATH_KREG_OFFSET(Control),
- .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
- .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
- .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
- .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
- .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
- .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
- .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
- .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
- .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
- .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
- .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
- .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
- .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
- .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
- .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
- .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
- .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
- .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
- .kr_intclear = IPATH_KREG_OFFSET(IntClear),
- .kr_intmask = IPATH_KREG_OFFSET(IntMask),
- .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
- .kr_mdio = IPATH_KREG_OFFSET(MDIO),
- .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
- .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
- .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
- .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
- .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
- .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
- .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
- .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
- .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
- .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
- .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
- .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
- .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
- .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
- .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
- .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
- .kr_revision = IPATH_KREG_OFFSET(Revision),
- .kr_scratch = IPATH_KREG_OFFSET(Scratch),
- .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
- .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
- .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
- .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
- .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
- .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
- .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
- .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
- .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
- .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
- .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
- .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
- .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
- .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
- .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg),
-
- /*
- * These should not be used directly via ipath_write_kreg64(),
- * use them with ipath_write_kreg64_port(),
- */
- .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
- .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
-
- /* The rcvpktled register controls one of the debug port signals, so
- * a packet activity LED can be connected to it. */
- .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
- .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
- .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
- .kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0),
- .kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1),
- .kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus),
- .kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0),
- .kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1),
- .kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus)
-};
-
-static const struct ipath_cregs ipath_pe_cregs = {
- .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
- .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
- .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
- .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
- .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
- .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
- .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
- .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
- .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
- .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
- .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
- .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
- .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
- .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
- .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
- .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
- .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
- .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
- .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
- .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
- .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
- .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
- .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
- .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
- .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
- .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
- .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
- .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
- .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
- .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
- .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
- .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
- .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
-};
-
-/* kr_control bits */
-#define INFINIPATH_C_RESET 1U
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 0
-#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 12
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-
-#define IBA6120_IBCS_LINKTRAININGSTATE_MASK 0xf
-#define IBA6120_IBCS_LINKSTATE_SHIFT 4
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET 0x5ULL
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA (1ULL << \
- (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL (1ULL << \
- (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-#define INFINIPATH_RT_BUFSIZE_MASK 0xe0000000ULL
-#define INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid) \
- ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
-#define INFINIPATH_RT_BUFSIZE(tid) (1 << INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid))
-#define INFINIPATH_RT_IS_VALID(tid) \
- (((tid) & INFINIPATH_RT_BUFSIZE_MASK) && \
- ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) != INFINIPATH_RT_BUFSIZE_MASK)))
-#define INFINIPATH_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
-#define INFINIPATH_RT_ADDR_SHIFT 10
-
-#define INFINIPATH_R_INTRAVAIL_SHIFT 16
-#define INFINIPATH_R_TAILUPD_SHIFT 31
-
-/* 6120 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
- INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
- INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
- /*
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
- INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
- INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
- INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
-};
-
-#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
-#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
- << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
-
-static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
- u32, unsigned long);
-
-/*
- * On platforms using this chip, and not having ordered WC stores, we
- * can get TXE parity errors due to speculative reads to the PIO buffers,
- * and this, due to a chip bug can result in (many) false parity error
- * reports. So it's a debug print on those, and an info print on systems
- * where the speculative reads don't occur.
- */
-static void ipath_pe_txe_recover(struct ipath_devdata *dd)
-{
- if (ipath_unordered_wc())
- ipath_dbg("Recovering from TXE PIO parity error\n");
- else {
- ++ipath_stats.sps_txeparity;
- dev_info(&dd->pcidev->dev,
- "Recovering from TXE PIO parity error\n");
- }
-}
-
-
-/**
- * ipath_pe_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
- size_t msgl)
-{
- ipath_err_t hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char bitsmsg[64];
- int log_idx;
-
- hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
- if (!hwerrs) {
- /*
- * better than printing cofusing messages
- * This seems to be related to clearing the crc error, or
- * the pll error during init.
- */
- ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
- return;
- } else if (hwerrs == ~0ULL) {
- ipath_dev_err(dd, "Read of hardware error status failed "
- "(all bits set); ignoring\n");
- return;
- }
- ipath_stats.sps_hwerrs++;
-
- /* Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
- hwerrs &= dd->ipath_hwerrmask;
-
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
- ipath_inc_eeprom_err(dd, log_idx, 1);
-
- /*
- * make sure we get this much out, unless told to be quiet,
- * or it's occurred within the last 5 seconds
- */
- if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
- RXE_EAGER_PARITY)) ||
- (ipath_debug & __IPATH_VERBDBG))
- dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
- "(cleared)\n", (unsigned long long) hwerrs);
- dd->ipath_lasthwerror |= hwerrs;
-
- if (hwerrs & ~dd->ipath_hwe_bitsextant)
- ipath_dev_err(dd, "hwerror interrupt with unknown errors "
- "%llx set\n", (unsigned long long)
- (hwerrs & ~dd->ipath_hwe_bitsextant));
-
- ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
- if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
- /*
- * parity errors in send memory are recoverable,
- * just cancel the send (if indicated in * sendbuffererror),
- * count the occurrence, unfreeze (if no other handled
- * hardware error bits are set), and continue. They can
- * occur if a processor speculative read is done to the PIO
- * buffer while we are sending a packet, for example.
- */
- if (hwerrs & TXE_PIO_PARITY) {
- ipath_pe_txe_recover(dd);
- hwerrs &= ~TXE_PIO_PARITY;
- }
- if (!hwerrs) {
- static u32 freeze_cnt;
-
- freeze_cnt++;
- ipath_dbg("Clearing freezemode on ignored or recovered "
- "hardware error (%u)\n", freeze_cnt);
- ipath_clear_freeze(dd);
- }
- }
-
- *msg = '\0';
-
- if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
- strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- ipath_format_hwerrors(hwerrs,
- ipath_6120_hwerror_msgs,
- sizeof(ipath_6120_hwerror_msgs)/
- sizeof(ipath_6120_hwerror_msgs[0]),
- msg, msgl);
-
- if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
- << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
- INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
- INFINIPATH_HWE_COREPLL_RFSLIP )
-
- if (hwerrs & _IPATH_PLL_FAIL) {
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the external
- * interface is unused
- */
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- if (hwerrs) {
- /*
- * if any set that we aren't ignoring; only
- * make the complaint once, in case it's stuck
- * or recurring, and we get here multiple
- * times.
- */
- ipath_dev_err(dd, "%s hardware error\n", msg);
- if (dd->ipath_flags & IPATH_INITTED) {
- ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
- ipath_setup_pe_setextled(dd,
- INFINIPATH_IBCS_L_STATE_DOWN,
- INFINIPATH_IBCS_LT_STATE_DISABLED);
- ipath_dev_err(dd, "Fatal Hardware Error (freeze "
- "mode), no longer usable, SN %.16s\n",
- dd->ipath_serial);
- isfatal = 1;
- }
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
- /* mark as having had error */
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- /*
- * mark as not usable, at a minimum until driver
- * is reloaded, probably until reboot, since no
- * other reset is possible.
- */
- dd->ipath_flags &= ~IPATH_INITTED;
- } else
- *msg = 0; /* recovered from all of them */
-
- if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg && msg) {
- /*
- * for /sys status file ; if no trailing brace is copied,
- * we'll know it was truncated.
- */
- snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
- "{%s}", msg);
- }
-}
-
-/**
- * ipath_pe_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * info is based on the board revision register
- */
-static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
- size_t namelen)
-{
- char *n = NULL;
- u8 boardrev = dd->ipath_boardrev;
- int ret;
-
- switch (boardrev) {
- case 0:
- n = "InfiniPath_Emulation";
- break;
- case 1:
- n = "InfiniPath_QLE7140-Bringup";
- break;
- case 2:
- n = "InfiniPath_QLE7140";
- break;
- case 3:
- n = "InfiniPath_QMI7140";
- break;
- case 4:
- n = "InfiniPath_QEM7140";
- break;
- case 5:
- n = "InfiniPath_QMH7140";
- break;
- case 6:
- n = "InfiniPath_QLE7142";
- break;
- default:
- ipath_dev_err(dd,
- "Don't yet know about board with ID %u\n",
- boardrev);
- snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
- boardrev);
- break;
- }
- if (n)
- snprintf(name, namelen, "%s", n);
-
- if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
- ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->ipath_majrev, dd->ipath_minrev);
- ret = 1;
- } else {
- ret = 0;
- if (dd->ipath_minrev >= 2)
- dd->ipath_f_put_tid = ipath_pe_put_tid_2;
- }
-
- /*
- * set here, not in ipath_init_*_funcs because we have to do
- * it after we can read chip registers.
- */
- dd->ipath_ureg_align =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-
- return ret;
-}
-
-/**
- * ipath_pe_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
-{
- ipath_err_t val;
- u64 extsval;
-
- extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
- if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
- ipath_dev_err(dd, "MemBIST did not complete!\n");
- if (extsval & INFINIPATH_EXTS_MEMBIST_FOUND)
- ipath_dbg("MemBIST corrected\n");
-
- val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
-
- if (!dd->ipath_boardrev) // no PLL for Emulator
- val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-
- if (dd->ipath_minrev < 2) {
- /* workaround bug 9460 in internal interface bus parity
- * checking. Fixed (HW bug 9490) in Rev2.
- */
- val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
- }
- dd->ipath_hwerrmask = val;
-}
-
-/**
- * ipath_pe_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
-{
- u64 val, config1, prev_val;
- int ret = 0;
-
- ipath_dbg("Trying to bringup serdes\n");
-
- if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
- INFINIPATH_HWE_SERDESPLLFAILED) {
- ipath_dbg("At start, serdes PLL failed bit set "
- "in hwerrstatus, clearing and continuing\n");
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- INFINIPATH_HWE_SERDESPLLFAILED);
- }
-
- dd->ibdeltainprog = 1;
- dd->ibsymsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
- config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
-
- ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, "
- "xgxsconfig %llx\n", (unsigned long long) val,
- (unsigned long long) config1, (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
- /*
- * Force reset on, also set rxdetect enable. Must do before reading
- * serdesstatus at least for simulation, or some of the bits in
- * serdes status will come back as undefined and cause simulation
- * failures
- */
- val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN
- | INFINIPATH_SERDC0_L1PWR_DN;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
- /* be sure chip saw it */
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- udelay(5); /* need pll reset set at least for a bit */
- /*
- * after PLL is reset, set the per-lane Resets and TxIdle and
- * clear the PLL reset and rxdetect (to get falling edge).
- * Leave L1PWR bits set (permanently)
- */
- val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL
- | INFINIPATH_SERDC0_L1PWR_DN);
- val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE;
- ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets "
- "and txidle (%llx)\n", (unsigned long long) val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
- /* be sure chip saw it */
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- /* need PLL reset clear for at least 11 usec before lane
- * resets cleared; give it a few more to be sure */
- udelay(15);
- val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE);
-
- ipath_cdbg(VERBOSE, "Clearing lane resets and txidle "
- "(writing %llx)\n", (unsigned long long) val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
- /* be sure chip saw it */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- prev_val = val;
- if (val & INFINIPATH_XGXS_RESET)
- val &= ~INFINIPATH_XGXS_RESET;
- if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
- INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
- /* need to compensate for Tx inversion in partner */
- val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
- INFINIPATH_XGXS_RX_POL_SHIFT);
- val |= dd->ipath_rx_pol_inv <<
- INFINIPATH_XGXS_RX_POL_SHIFT;
- }
- if (val != prev_val)
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
- /* clear current and de-emphasis bits */
- config1 &= ~0x0ffffffff00ULL;
- /* set current to 20ma */
- config1 |= 0x00000000000ULL;
- /* set de-emphasis to -5.68dB */
- config1 |= 0x0cccc000000ULL;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
-
- ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx "
- "config1=%llx, sstatus=%llx xgxs=%llx\n",
- (unsigned long long) val, (unsigned long long) config1,
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
- return ret;
-}
-
-/**
- * ipath_pe_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * Called when driver is being unloaded
- */
-static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
-{
- u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
- if (dd->ibsymdelta || dd->iblnkerrdelta ||
- dd->ibdeltainprog) {
- u64 diagc;
- /* enable counter writes */
- diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl,
- diagc | INFINIPATH_DC_COUNTERWREN);
-
- if (dd->ibsymdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->ibsymsnap;
- val -= dd->ibsymdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt, val);
- }
- if (dd->iblnkerrdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->iblnkerrsnap;
- val -= dd->iblnkerrdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt, val);
- }
-
- /* and disable counter writes */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc);
- }
- val |= INFINIPATH_SERDC0_TXIDLE;
- ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
- (unsigned long long) val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-}
-
-static int ipath_pe_intconfig(struct ipath_devdata *dd)
-{
- u32 chiprev;
-
- /*
- * If the chip supports added error indication via GPIO pins,
- * enable interrupts on those bits so the interrupt routine
- * can count the events. Also set flag so interrupt routine
- * can know they are expected.
- */
- chiprev = dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT;
- if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) {
- /* Rev2+ reports extra errors via internal GPIO pins */
- dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
- dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
- dd->ipath_gpio_mask);
- }
- return 0;
-}
-
-/**
- * ipath_setup_pe_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
-
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
- u64 ltst)
-{
- u64 extctl;
- unsigned long flags = 0;
-
- /* the diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running */
- if (ipath_diag_inuse)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (dd->ipath_led_override) {
- ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
- ? INFINIPATH_IBCS_LT_STATE_LINKUP
- : INFINIPATH_IBCS_LT_STATE_DISABLED;
- lst = (dd->ipath_led_override & IPATH_LED_LOG)
- ? INFINIPATH_IBCS_L_STATE_ACTIVE
- : INFINIPATH_IBCS_L_STATE_DOWN;
- }
-
- spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
- extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
- INFINIPATH_EXTC_LED2PRIPORT_ON);
-
- if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
- extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
- if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
- extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
- dd->ipath_extctrl = extctl;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-}
-
-/**
- * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * This is called during driver unload.
- * We do the pci_disable_msi here, not in generic code, because it
- * isn't used for the HT chips. If we do end up needing pci_enable_msi
- * at some point in the future for HT, we'll move the call back
- * into the main init_one code.
- */
-static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
-{
- dd->ipath_msi_lo = 0; /* just in case unload fails */
- pci_disable_msi(dd->pcidev);
-}
-
-static void ipath_6120_pcie_params(struct ipath_devdata *dd)
-{
- u16 linkstat, speed;
- int pos;
-
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
- if (!pos) {
- ipath_dev_err(dd, "Can't find PCI Express capability!\n");
- goto bail;
- }
-
- pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
- &linkstat);
- /*
- * speed is bits 0-4, linkwidth is bits 4-8
- * no defines for them in headers
- */
- speed = linkstat & 0xf;
- linkstat >>= 4;
- linkstat &= 0x1f;
- dd->ipath_lbus_width = linkstat;
-
- switch (speed) {
- case 1:
- dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
- break;
- case 2:
- dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
- break;
- default: /* not defined, assume gen1 */
- dd->ipath_lbus_speed = 2500;
- break;
- }
-
- if (linkstat < 8)
- ipath_dev_err(dd,
- "PCIe width %u (x8 HCA), performance reduced\n",
- linkstat);
- else
- ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x8 HCA)\n",
- dd->ipath_lbus_speed, linkstat);
-
- if (speed != 1)
- ipath_dev_err(dd,
- "PCIe linkspeed %u is incorrect; "
- "should be 1 (2500)!\n", speed);
-bail:
- /* fill in string, even on errors */
- snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
- "PCIe,%uMHz,x%u\n",
- dd->ipath_lbus_speed,
- dd->ipath_lbus_width);
-
- return;
-}
-
-/**
- * ipath_setup_pe_config - setup PCIe config related stuff
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * The pci_enable_msi() call will fail on systems with MSI quirks
- * such as those with AMD8131, even if the device of interest is not
- * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
- * late in 2.6.16).
- * All that can be done is to edit the kernel source to remove the quirk
- * check until that is fixed.
- * We do not need to call enable_msi() for our HyperTransport chip,
- * even though it uses MSI, and we want to avoid the quirk warning, so
- * So we call enable_msi only for PCIe. If we do end up needing
- * pci_enable_msi at some point in the future for HT, we'll move the
- * call back into the main init_one code.
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly).
- */
-static int ipath_setup_pe_config(struct ipath_devdata *dd,
- struct pci_dev *pdev)
-{
- int pos, ret;
-
- dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
- ret = pci_enable_msi(dd->pcidev);
- if (ret)
- ipath_dev_err(dd, "pci_enable_msi failed: %d, "
- "interrupts may not work\n", ret);
- /* continue even if it fails, we may still be OK... */
- dd->ipath_irq = pdev->irq;
-
- if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
- u16 control;
- pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- &dd->ipath_msi_lo);
- pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- &dd->ipath_msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- &control);
- /* now save the data (vector) info */
- pci_read_config_word(dd->pcidev,
- pos + ((control & PCI_MSI_FLAGS_64BIT)
- ? 12 : 8),
- &dd->ipath_msi_data);
- ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset "
- "0x%x, control=0x%x\n", dd->ipath_msi_data,
- pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- control);
- /* we save the cachelinesize also, although it doesn't
- * really matter */
- pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
- &dd->ipath_pci_cacheline);
- } else
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't save MSI settings for reset\n");
-
- ipath_6120_pcie_params(dd);
-
- dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- dd->ipath_link_speed_supported = IPATH_IB_SDR;
- dd->ipath_link_width_enabled = IB_WIDTH_4X;
- dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
- /* these can't change for this chip, so set once */
- dd->ipath_link_width_active = dd->ipath_link_width_enabled;
- dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
- return 0;
-}
-
-static void ipath_init_pe_variables(struct ipath_devdata *dd)
-{
- /*
- * setup the register offsets, since they are different for each
- * chip
- */
- dd->ipath_kregs = &ipath_pe_kregs;
- dd->ipath_cregs = &ipath_pe_cregs;
-
- /*
- * bits for selecting i2c direction and values,
- * used for I2C serial flash
- */
- dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
- dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
- dd->ipath_gpio_sda = IPATH_GPIO_SDA;
- dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
- /*
- * Fill in data for field-values that change in newer chips.
- * We dynamically specify only the mask for LINKTRAININGSTATE
- * and only the shift for LINKSTATE, as they are the only ones
- * that change. Also precalculate the 3 link states of interest
- * and the combined mask.
- */
- dd->ibcs_ls_shift = IBA6120_IBCS_LINKSTATE_SHIFT;
- dd->ibcs_lts_mask = IBA6120_IBCS_LINKTRAININGSTATE_MASK;
- dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
- dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
- dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
- dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
- dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
- /*
- * Fill in data for ibcc field-values that change in newer chips.
- * We dynamically specify only the mask for LINKINITCMD
- * and only the shift for LINKCMD and MAXPKTLEN, as they are
- * the only ones that change.
- */
- dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
- dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
- dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-
- /* Fill in shifts for RcvCtrl. */
- dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
- dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
- dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
- dd->ipath_r_portcfg_shift = 0; /* Not on IBA6120 */
-
- /* variables for sanity checking interrupt and errors */
- dd->ipath_hwe_bitsextant =
- (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
- INFINIPATH_HWE_PCIE1PLLFAILED |
- INFINIPATH_HWE_PCIE0PLLFAILED |
- INFINIPATH_HWE_PCIEPOISONEDTLP |
- INFINIPATH_HWE_PCIECPLTIMEOUT |
- INFINIPATH_HWE_PCIEBUSPARITYXTLH |
- INFINIPATH_HWE_PCIEBUSPARITYXADM |
- INFINIPATH_HWE_PCIEBUSPARITYRADM |
- INFINIPATH_HWE_MEMBISTFAILED |
- INFINIPATH_HWE_COREPLL_FBSLIP |
- INFINIPATH_HWE_COREPLL_RFSLIP |
- INFINIPATH_HWE_SERDESPLLFAILED |
- INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
- INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
- dd->ipath_i_bitsextant =
- (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
- (INFINIPATH_I_RCVAVAIL_MASK <<
- INFINIPATH_I_RCVAVAIL_SHIFT) |
- INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
- INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
- dd->ipath_e_bitsextant =
- INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
- INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
- INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
- INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
- INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
- INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
- INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
- INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
- INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
- INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
- INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
- INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
- INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
- INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
- INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
- INFINIPATH_E_HARDWARE;
-
- dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
- dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
- dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
- dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
-
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->ipath_eep_st_masks[0].hwerrs_to_log =
- INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
- /* Ignore errors in PIO/PBC on systems with unordered write-combining */
- if (ipath_unordered_wc())
- dd->ipath_eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
-
- dd->ipath_eep_st_masks[1].hwerrs_to_log =
- INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
- dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
- dd->delay_mult = 2; /* SDR, 4X, can't change */
-}
-
-/* setup the MSI stuff again after a reset. I'd like to just call
- * pci_enable_msi() and request_irq() again, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
- * so I instead just do it all inline. Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- * Note, because I'm doing it all here, I don't call pci_disable_msi()
- * or free_irq() at the start of ipath_setup_pe_reset().
- */
-static int ipath_reinit_msi(struct ipath_devdata *dd)
-{
- int pos;
- u16 control;
- int ret;
-
- if (!dd->ipath_msi_lo) {
- dev_info(&dd->pcidev->dev, "Can't restore MSI config, "
- "initial setup failed?\n");
- ret = 0;
- goto bail;
- }
-
- if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't restore MSI settings\n");
- ret = 0;
- goto bail;
- }
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- dd->ipath_msi_lo);
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- dd->ipath_msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE)) {
- ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
- "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
- control, control | PCI_MSI_FLAGS_ENABLE);
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- control);
- }
- /* now rewrite the data (vector) info */
- pci_write_config_word(dd->pcidev, pos +
- ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- dd->ipath_msi_data);
- /* we restore the cachelinesize also, although it doesn't really
- * matter */
- pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
- dd->ipath_pci_cacheline);
- /* and now set the pci master bit again */
- pci_set_master(dd->pcidev);
- ret = 1;
-
-bail:
- return ret;
-}
-
-/* This routine sleeps, so it can only be called from user context, not
- * from interrupt context. If we need interrupt context, we can split
- * it into two routines.
-*/
-static int ipath_setup_pe_reset(struct ipath_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
-
- pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
-
- /* Use ERROR so it shows up in logs, etc. */
- ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
- /* keep chip from being accessed in a few places */
- dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
- val = dd->ipath_control | INFINIPATH_C_RESET;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
- mb();
-
- for (i = 1; i <= 5; i++) {
- int r;
- /* allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
- if ((r =
- pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
- dd->ipath_pcibar0)))
- ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n",
- r);
- if ((r =
- pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
- dd->ipath_pcibar1)))
- ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
- r);
- /* now re-enable memory access */
- pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
- if ((r = pci_enable_device(dd->pcidev)))
- ipath_dev_err(dd, "pci_enable_device failed after "
- "reset: %d\n", r);
- /*
- * whether it fully enabled or not, mark as present,
- * again (but not INITTED)
- */
- dd->ipath_flags |= IPATH_PRESENT;
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
- if (val == dd->ipath_revision) {
- ipath_cdbg(VERBOSE, "Got matching revision "
- "register %llx on try %d\n",
- (unsigned long long) val, i);
- ret = ipath_reinit_msi(dd);
- goto bail;
- }
- /* Probably getting -1 back */
- ipath_dbg("Didn't get expected revision register, "
- "got %llx, try %d\n", (unsigned long long) val,
- i + 1);
- }
- ret = 0; /* failed */
-
-bail:
- if (ret)
- ipath_6120_pcie_params(dd);
- return ret;
-}
-
-/**
- * ipath_pe_put_tid - write a TID in chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- unsigned long flags = 0; /* keep gcc quiet */
- int tidx;
- spinlock_t *tidlockp;
-
- if (!dd->ipath_kregbase)
- return;
-
- if (pa != dd->ipath_tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
- "not 2KB aligned!\n", pa);
- return;
- }
- pa >>= 11;
- /* paranoia check */
- if (pa & ~INFINIPATH_RT_ADDR_MASK)
- ipath_dev_err(dd,
- "BUG: Physical page address 0x%lx "
- "has bits set in 31-29\n", pa);
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->ipath_tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
-
- /*
- * Workaround chip bug 9437 by writing the scratch register
- * before and after the TID, and with an io write barrier.
- * We use a spinlock around the writes, so they can't intermix
- * with other TID (eager or expected) writes (the chip bug
- * is triggered by back to back TID writes). Unfortunately, this
- * call can be done from interrupt level for the port 0 eager TIDs,
- * so we have to use irqsave locks.
- */
- /*
- * Assumes tidptr always > ipath_egrtidbase
- * if type == RCVHQ_RCV_TYPE_EAGER.
- */
- tidx = tidptr - dd->ipath_egrtidbase;
-
- tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->ipath_rcvegrcnt)
- ? &dd->ipath_kernel_tid_lock : &dd->ipath_user_tid_lock;
- spin_lock_irqsave(tidlockp, flags);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
- writel(pa, tidp32);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
- mmiowb();
- spin_unlock_irqrestore(tidlockp, flags);
-}
-
-/**
- * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for selection of the
- * appropriate "flavor". The static calls in cleanup just use the
- * revision-agnostic form, as they are not performance critical.
- */
-static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- u32 tidx;
-
- if (!dd->ipath_kregbase)
- return;
-
- if (pa != dd->ipath_tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
- "not 2KB aligned!\n", pa);
- return;
- }
- pa >>= 11;
- /* paranoia check */
- if (pa & ~INFINIPATH_RT_ADDR_MASK)
- ipath_dev_err(dd,
- "BUG: Physical page address 0x%lx "
- "has bits set in 31-29\n", pa);
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->ipath_tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
- tidx = tidptr - dd->ipath_egrtidbase;
- writel(pa, tidp32);
- mmiowb();
-}
-
-
-/**
- * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * clear all TID entries for a port, expected and eager.
- * Used from ipath_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- int i;
-
- if (!dd->ipath_kregbase)
- return;
-
- ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
- tidinv = dd->ipath_tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvtidbase +
- port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->ipath_rcvtidcnt; i++)
- dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvegrbase +
- port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->ipath_rcvegrcnt; i++)
- dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * ipath_pe_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_pe_tidtemplate(struct ipath_devdata *dd)
-{
- u32 egrsize = dd->ipath_rcvegrbufsize;
-
- /* For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make be per port instead of per device
- * for those who want to reduce memory footprint. Note that the
- * ipath_rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (egrsize == 2048)
- dd->ipath_tidtemplate = 1U << 29;
- else if (egrsize == 4096)
- dd->ipath_tidtemplate = 2U << 29;
- else {
- egrsize = 4096;
- dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
- "%u, using %u\n", dd->ipath_rcvegrbufsize,
- egrsize);
- dd->ipath_tidtemplate = 2U << 29;
- }
- dd->ipath_tidinvalid = 0;
-}
-
-static int ipath_pe_early_init(struct ipath_devdata *dd)
-{
- dd->ipath_flags |= IPATH_4BYTE_TID;
- if (ipath_unordered_wc())
- dd->ipath_flags |= IPATH_PIO_FLUSH_WC;
-
- /*
- * For openfabrics, we need to be able to handle an IB header of
- * 24 dwords. HT chip has arbitrary sized receive buffers, so we
- * made them the same size as the PIO buffers. This chip does not
- * handle arbitrary size buffers, so we need the header large enough
- * to handle largest IB header, but still have room for a 2KB MTU
- * standard IB packet.
- */
- dd->ipath_rcvhdrentsize = 24;
- dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
- dd->ipath_rhf_offset = 0;
- dd->ipath_egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
-
- dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
- /*
- * the min() check here is currently a nop, but it may not always
- * be, depending on just how we do ipath_rcvegrbufsize
- */
- dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
- dd->ipath_piosize2k,
- dd->ipath_rcvegrbufsize +
- (dd->ipath_rcvhdrentsize << 2));
- dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset. For now, we set this
- * up for a single packet.
- */
- dd->ipath_rhdrhead_intr_off = 1ULL<<32;
-
- ipath_get_eeprom_info(dd);
-
- return 0;
-}
-
-int __attribute__((weak)) ipath_unordered_wc(void)
-{
- return 0;
-}
-
-/**
- * ipath_init_pe_get_base_info - set chip-specific flags for user code
- * @pd: the infinipath port
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithms.
- */
-static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
- struct ipath_base_info *kinfo = kbase;
- struct ipath_devdata *dd;
-
- if (ipath_unordered_wc()) {
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER;
- ipath_cdbg(PROC, "Intel processor, forcing WC order\n");
- }
- else
- ipath_cdbg(PROC, "Not Intel processor, WC ordered\n");
-
- if (pd == NULL)
- goto done;
-
- dd = pd->port_dd;
-
-done:
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE |
- IPATH_RUNTIME_FORCE_PIOAVAIL | IPATH_RUNTIME_PIO_REGSWAPPED;
- return 0;
-}
-
-static void ipath_pe_free_irq(struct ipath_devdata *dd)
-{
- free_irq(dd->ipath_irq, dd);
- dd->ipath_irq = 0;
-}
-
-
-static struct ipath_message_header *
-ipath_pe_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
- return (struct ipath_message_header *)
- &rhf_addr[sizeof(u64) / sizeof(u32)];
-}
-
-static void ipath_pe_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
- dd->ipath_portcnt =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
- dd->ipath_p0_rcvegrcnt =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-}
-
-static void ipath_pe_read_counters(struct ipath_devdata *dd,
- struct infinipath_counters *cntrs)
-{
- cntrs->LBIntCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
- cntrs->LBFlowStallCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
- cntrs->TxSDmaDescCnt = 0;
- cntrs->TxUnsupVLErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
- cntrs->TxDataPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
- cntrs->TxFlowPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
- cntrs->TxDwordCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
- cntrs->TxLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
- cntrs->TxMaxMinLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
- cntrs->TxUnderrunCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
- cntrs->TxFlowStallCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
- cntrs->TxDroppedPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
- cntrs->RxDroppedPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
- cntrs->RxDataPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
- cntrs->RxFlowPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
- cntrs->RxDwordCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
- cntrs->RxLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
- cntrs->RxMaxMinLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
- cntrs->RxICRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
- cntrs->RxVCRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
- cntrs->RxFlowCtrlErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
- cntrs->RxBadFormatCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
- cntrs->RxLinkProblemCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
- cntrs->RxEBPCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
- cntrs->RxLPCRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
- cntrs->RxBufOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
- cntrs->RxTIDFullErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
- cntrs->RxTIDValidErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
- cntrs->RxPKeyMismatchCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
- cntrs->RxP0HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
- cntrs->RxP1HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
- cntrs->RxP2HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
- cntrs->RxP3HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
- cntrs->RxP4HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
- cntrs->RxP5HdrEgrOvflCnt = 0;
- cntrs->RxP6HdrEgrOvflCnt = 0;
- cntrs->RxP7HdrEgrOvflCnt = 0;
- cntrs->RxP8HdrEgrOvflCnt = 0;
- cntrs->RxP9HdrEgrOvflCnt = 0;
- cntrs->RxP10HdrEgrOvflCnt = 0;
- cntrs->RxP11HdrEgrOvflCnt = 0;
- cntrs->RxP12HdrEgrOvflCnt = 0;
- cntrs->RxP13HdrEgrOvflCnt = 0;
- cntrs->RxP14HdrEgrOvflCnt = 0;
- cntrs->RxP15HdrEgrOvflCnt = 0;
- cntrs->RxP16HdrEgrOvflCnt = 0;
- cntrs->IBStatusChangeCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
- cntrs->IBLinkErrRecoveryCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
- cntrs->IBLinkDownedCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
- cntrs->IBSymbolErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
- cntrs->RxVL15DroppedPktCnt = 0;
- cntrs->RxOtherLocalPhyErrCnt = 0;
- cntrs->PcieRetryBufDiagQwordCnt = 0;
- cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
- cntrs->LocalLinkIntegrityErrCnt = dd->ipath_lli_errs;
- cntrs->RxVlErrCnt = 0;
- cntrs->RxDlidFltrCnt = 0;
-}
-
-
-/* no interrupt fallback for these chips */
-static int ipath_pe_nointr_fallback(struct ipath_devdata *dd)
-{
- return 0;
-}
-
-
-/*
- * reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void ipath_pe_xgxs_reset(struct ipath_devdata *dd)
-{
- u64 val, prev_val;
-
- prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- val = prev_val | INFINIPATH_XGXS_RESET;
- prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
-}
-
-
-static int ipath_pe_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
- int ret;
-
- switch (which) {
- case IPATH_IB_CFG_LWID:
- ret = dd->ipath_link_width_active;
- break;
- case IPATH_IB_CFG_SPD:
- ret = dd->ipath_link_speed_active;
- break;
- case IPATH_IB_CFG_LWID_ENB:
- ret = dd->ipath_link_width_enabled;
- break;
- case IPATH_IB_CFG_SPD_ENB:
- ret = dd->ipath_link_speed_enabled;
- break;
- default:
- ret = -ENOTSUPP;
- break;
- }
- return ret;
-}
-
-
-/* we assume range checking is already done, if needed */
-static int ipath_pe_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
- int ret = 0;
-
- if (which == IPATH_IB_CFG_LWID_ENB)
- dd->ipath_link_width_enabled = val;
- else if (which == IPATH_IB_CFG_SPD_ENB)
- dd->ipath_link_speed_enabled = val;
- else
- ret = -ENOTSUPP;
- return ret;
-}
-
-static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
-{
-}
-
-
-static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
- if (ibup) {
- if (dd->ibdeltainprog) {
- dd->ibdeltainprog = 0;
- dd->ibsymdelta +=
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt) -
- dd->ibsymsnap;
- dd->iblnkerrdelta +=
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt) -
- dd->iblnkerrsnap;
- }
- } else {
- dd->ipath_lli_counter = 0;
- if (!dd->ibdeltainprog) {
- dd->ibdeltainprog = 1;
- dd->ibsymsnap =
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap =
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- }
- }
-
- ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs),
- ipath_ib_linktrstate(dd, ibcs));
- return 0;
-}
-
-
-/**
- * ipath_init_iba6120_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
-{
- dd->ipath_f_intrsetup = ipath_pe_intconfig;
- dd->ipath_f_bus = ipath_setup_pe_config;
- dd->ipath_f_reset = ipath_setup_pe_reset;
- dd->ipath_f_get_boardname = ipath_pe_boardname;
- dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors;
- dd->ipath_f_early_init = ipath_pe_early_init;
- dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors;
- dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
- dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
- dd->ipath_f_clear_tids = ipath_pe_clear_tids;
- /*
- * _f_put_tid may get changed after we read the chip revision,
- * but we start with the safe version for all revs
- */
- dd->ipath_f_put_tid = ipath_pe_put_tid;
- dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
- dd->ipath_f_setextled = ipath_setup_pe_setextled;
- dd->ipath_f_get_base_info = ipath_pe_get_base_info;
- dd->ipath_f_free_irq = ipath_pe_free_irq;
- dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
- dd->ipath_f_intr_fallback = ipath_pe_nointr_fallback;
- dd->ipath_f_xgxs_reset = ipath_pe_xgxs_reset;
- dd->ipath_f_get_msgheader = ipath_pe_get_msgheader;
- dd->ipath_f_config_ports = ipath_pe_config_ports;
- dd->ipath_f_read_counters = ipath_pe_read_counters;
- dd->ipath_f_get_ib_cfg = ipath_pe_get_ib_cfg;
- dd->ipath_f_set_ib_cfg = ipath_pe_set_ib_cfg;
- dd->ipath_f_config_jint = ipath_pe_config_jint;
- dd->ipath_f_ib_updown = ipath_pe_ib_updown;
-
-
- /* initialize chip-specific variables */
- ipath_init_pe_variables(dd);
-}
-
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
deleted file mode 100644
index 34b778ed97fc..000000000000
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ /dev/null
@@ -1,2631 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath 7220 chip (except that specific to the SerDes)
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
-
-static void ipath_setup_7220_setextled(struct ipath_devdata *, u64, u64);
-
-static unsigned ipath_compat_ddr_negotiate = 1;
-
-module_param_named(compat_ddr_negotiate, ipath_compat_ddr_negotiate, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(compat_ddr_negotiate,
- "Attempt pre-IBTA 1.2 DDR speed negotiation");
-
-static unsigned ipath_sdma_fetch_arb = 1;
-module_param_named(fetch_arb, ipath_sdma_fetch_arb, uint, S_IRUGO);
-MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
-
-/*
- * This file contains almost all the chip-specific register information and
- * access functions for the QLogic InfiniPath 7220 PCI-Express chip, with the
- * exception of SerDes support, which in in ipath_sd7220.c.
- *
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- */
-struct _infinipath_do_not_use_kernel_regs {
- unsigned long long Revision;
- unsigned long long Control;
- unsigned long long PageAlign;
- unsigned long long PortCnt;
- unsigned long long DebugPortSelect;
- unsigned long long DebugSigsIntSel; /* was Reserved0;*/
- unsigned long long SendRegBase;
- unsigned long long UserRegBase;
- unsigned long long CounterRegBase;
- unsigned long long Scratch;
- unsigned long long EEPROMAddrCmd; /* was Reserved1; */
- unsigned long long EEPROMData; /* was Reserved2; */
- unsigned long long IntBlocked;
- unsigned long long IntMask;
- unsigned long long IntStatus;
- unsigned long long IntClear;
- unsigned long long ErrorMask;
- unsigned long long ErrorStatus;
- unsigned long long ErrorClear;
- unsigned long long HwErrMask;
- unsigned long long HwErrStatus;
- unsigned long long HwErrClear;
- unsigned long long HwDiagCtrl;
- unsigned long long MDIO;
- unsigned long long IBCStatus;
- unsigned long long IBCCtrl;
- unsigned long long ExtStatus;
- unsigned long long ExtCtrl;
- unsigned long long GPIOOut;
- unsigned long long GPIOMask;
- unsigned long long GPIOStatus;
- unsigned long long GPIOClear;
- unsigned long long RcvCtrl;
- unsigned long long RcvBTHQP;
- unsigned long long RcvHdrSize;
- unsigned long long RcvHdrCnt;
- unsigned long long RcvHdrEntSize;
- unsigned long long RcvTIDBase;
- unsigned long long RcvTIDCnt;
- unsigned long long RcvEgrBase;
- unsigned long long RcvEgrCnt;
- unsigned long long RcvBufBase;
- unsigned long long RcvBufSize;
- unsigned long long RxIntMemBase;
- unsigned long long RxIntMemSize;
- unsigned long long RcvPartitionKey;
- unsigned long long RcvQPMulticastPort;
- unsigned long long RcvPktLEDCnt;
- unsigned long long IBCDDRCtrl;
- unsigned long long HRTBT_GUID;
- unsigned long long IB_SDTEST_IF_TX;
- unsigned long long IB_SDTEST_IF_RX;
- unsigned long long IBCDDRCtrl2;
- unsigned long long IBCDDRStatus;
- unsigned long long JIntReload;
- unsigned long long IBNCModeCtrl;
- unsigned long long SendCtrl;
- unsigned long long SendBufBase;
- unsigned long long SendBufSize;
- unsigned long long SendBufCnt;
- unsigned long long SendAvailAddr;
- unsigned long long TxIntMemBase;
- unsigned long long TxIntMemSize;
- unsigned long long SendDmaBase;
- unsigned long long SendDmaLenGen;
- unsigned long long SendDmaTail;
- unsigned long long SendDmaHead;
- unsigned long long SendDmaHeadAddr;
- unsigned long long SendDmaBufMask0;
- unsigned long long SendDmaBufMask1;
- unsigned long long SendDmaBufMask2;
- unsigned long long SendDmaStatus;
- unsigned long long SendBufferError;
- unsigned long long SendBufferErrorCONT1;
- unsigned long long SendBufErr2; /* was Reserved6SBE[0/6] */
- unsigned long long Reserved6L[2];
- unsigned long long AvailUpdCount;
- unsigned long long RcvHdrAddr0;
- unsigned long long RcvHdrAddrs[16]; /* Why enumerate? */
- unsigned long long Reserved7hdtl; /* Align next to 300 */
- unsigned long long RcvHdrTailAddr0; /* 300, like others */
- unsigned long long RcvHdrTailAddrs[16];
- unsigned long long Reserved9SW[7]; /* was [8]; we have 17 ports */
- unsigned long long IbsdEpbAccCtl; /* IB Serdes EPB access control */
- unsigned long long IbsdEpbTransReg; /* IB Serdes EPB Transaction */
- unsigned long long Reserved10sds; /* was SerdesStatus on */
- unsigned long long XGXSConfig;
- unsigned long long IBSerDesCtrl; /* Was IBPLLCfg on Monty */
- unsigned long long EEPCtlStat; /* for "boot" EEPROM/FLASH */
- unsigned long long EEPAddrCmd;
- unsigned long long EEPData;
- unsigned long long PcieEpbAccCtl;
- unsigned long long PcieEpbTransCtl;
- unsigned long long EfuseCtl; /* E-Fuse control */
- unsigned long long EfuseData[4];
- unsigned long long ProcMon;
- /* this chip moves following two from previous 200, 208 */
- unsigned long long PCIeRBufTestReg0;
- unsigned long long PCIeRBufTestReg1;
- /* added for this chip */
- unsigned long long PCIeRBufTestReg2;
- unsigned long long PCIeRBufTestReg3;
- /* added for this chip, debug only */
- unsigned long long SPC_JTAG_ACCESS_REG;
- unsigned long long LAControlReg;
- unsigned long long GPIODebugSelReg;
- unsigned long long DebugPortValueReg;
- /* added for this chip, DMA */
- unsigned long long SendDmaBufUsed[3];
- unsigned long long SendDmaReqTagUsed;
- /*
- * added for this chip, EFUSE: note that these program 64-bit
- * words 2 and 3 */
- unsigned long long efuse_pgm_data[2];
- unsigned long long Reserved11LAalign[10]; /* Skip 4B0..4F8 */
- /* we have 30 regs for DDS and RXEQ in IB SERDES */
- unsigned long long SerDesDDSRXEQ[30];
- unsigned long long Reserved12LAalign[2]; /* Skip 5F0, 5F8 */
- /* added for LA debug support */
- unsigned long long LAMemory[32];
-};
-
-struct _infinipath_do_not_use_counters {
- __u64 LBIntCnt;
- __u64 LBFlowStallCnt;
- __u64 TxSDmaDescCnt; /* was Reserved1 */
- __u64 TxUnsupVLErrCnt;
- __u64 TxDataPktCnt;
- __u64 TxFlowPktCnt;
- __u64 TxDwordCnt;
- __u64 TxLenErrCnt;
- __u64 TxMaxMinLenErrCnt;
- __u64 TxUnderrunCnt;
- __u64 TxFlowStallCnt;
- __u64 TxDroppedPktCnt;
- __u64 RxDroppedPktCnt;
- __u64 RxDataPktCnt;
- __u64 RxFlowPktCnt;
- __u64 RxDwordCnt;
- __u64 RxLenErrCnt;
- __u64 RxMaxMinLenErrCnt;
- __u64 RxICRCErrCnt;
- __u64 RxVCRCErrCnt;
- __u64 RxFlowCtrlErrCnt;
- __u64 RxBadFormatCnt;
- __u64 RxLinkProblemCnt;
- __u64 RxEBPCnt;
- __u64 RxLPCRCErrCnt;
- __u64 RxBufOvflCnt;
- __u64 RxTIDFullErrCnt;
- __u64 RxTIDValidErrCnt;
- __u64 RxPKeyMismatchCnt;
- __u64 RxP0HdrEgrOvflCnt;
- __u64 RxP1HdrEgrOvflCnt;
- __u64 RxP2HdrEgrOvflCnt;
- __u64 RxP3HdrEgrOvflCnt;
- __u64 RxP4HdrEgrOvflCnt;
- __u64 RxP5HdrEgrOvflCnt;
- __u64 RxP6HdrEgrOvflCnt;
- __u64 RxP7HdrEgrOvflCnt;
- __u64 RxP8HdrEgrOvflCnt;
- __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */
- __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */
- __u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 IBStatusChangeCnt;
- __u64 IBLinkErrRecoveryCnt;
- __u64 IBLinkDownedCnt;
- __u64 IBSymbolErrCnt;
- /* The following are new for IBA7220 */
- __u64 RxVL15DroppedPktCnt;
- __u64 RxOtherLocalPhyErrCnt;
- __u64 PcieRetryBufDiagQwordCnt;
- __u64 ExcessBufferOvflCnt;
- __u64 LocalLinkIntegrityErrCnt;
- __u64 RxVlErrCnt;
- __u64 RxDlidFltrCnt;
- __u64 Reserved8[7];
- __u64 PSStat;
- __u64 PSStart;
- __u64 PSInterval;
- __u64 PSRcvDataCount;
- __u64 PSRcvPktsCount;
- __u64 PSXmitDataCount;
- __u64 PSXmitPktsCount;
- __u64 PSXmitWaitCount;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_7220_kregs = {
- .kr_control = IPATH_KREG_OFFSET(Control),
- .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
- .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
- .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
- .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
- .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
- .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
- .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
- .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
- .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
- .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
- .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
- .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
- .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
- .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
- .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
- .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
- .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
- .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
- .kr_intclear = IPATH_KREG_OFFSET(IntClear),
- .kr_intmask = IPATH_KREG_OFFSET(IntMask),
- .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
- .kr_mdio = IPATH_KREG_OFFSET(MDIO),
- .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
- .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
- .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
- .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
- .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
- .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
- .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
- .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
- .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
- .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
- .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
- .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
- .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
- .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
- .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
- .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
- .kr_revision = IPATH_KREG_OFFSET(Revision),
- .kr_scratch = IPATH_KREG_OFFSET(Scratch),
- .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
- .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
- .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendAvailAddr),
- .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendBufBase),
- .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendBufCnt),
- .kr_sendpiosize = IPATH_KREG_OFFSET(SendBufSize),
- .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
- .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
- .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
- .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
-
- .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
-
- /* send dma related regs */
- .kr_senddmabase = IPATH_KREG_OFFSET(SendDmaBase),
- .kr_senddmalengen = IPATH_KREG_OFFSET(SendDmaLenGen),
- .kr_senddmatail = IPATH_KREG_OFFSET(SendDmaTail),
- .kr_senddmahead = IPATH_KREG_OFFSET(SendDmaHead),
- .kr_senddmaheadaddr = IPATH_KREG_OFFSET(SendDmaHeadAddr),
- .kr_senddmabufmask0 = IPATH_KREG_OFFSET(SendDmaBufMask0),
- .kr_senddmabufmask1 = IPATH_KREG_OFFSET(SendDmaBufMask1),
- .kr_senddmabufmask2 = IPATH_KREG_OFFSET(SendDmaBufMask2),
- .kr_senddmastatus = IPATH_KREG_OFFSET(SendDmaStatus),
-
- /* SerDes related regs */
- .kr_ibserdesctrl = IPATH_KREG_OFFSET(IBSerDesCtrl),
- .kr_ib_epbacc = IPATH_KREG_OFFSET(IbsdEpbAccCtl),
- .kr_ib_epbtrans = IPATH_KREG_OFFSET(IbsdEpbTransReg),
- .kr_pcie_epbacc = IPATH_KREG_OFFSET(PcieEpbAccCtl),
- .kr_pcie_epbtrans = IPATH_KREG_OFFSET(PcieEpbTransCtl),
- .kr_ib_ddsrxeq = IPATH_KREG_OFFSET(SerDesDDSRXEQ),
-
- /*
- * These should not be used directly via ipath_read_kreg64(),
- * use them with ipath_read_kreg64_port()
- */
- .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
- .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
-
- /*
- * The rcvpktled register controls one of the debug port signals, so
- * a packet activity LED can be connected to it.
- */
- .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
- .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
- .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
-
- .kr_hrtbt_guid = IPATH_KREG_OFFSET(HRTBT_GUID),
- .kr_ibcddrctrl = IPATH_KREG_OFFSET(IBCDDRCtrl),
- .kr_ibcddrstatus = IPATH_KREG_OFFSET(IBCDDRStatus),
- .kr_jintreload = IPATH_KREG_OFFSET(JIntReload)
-};
-
-static const struct ipath_cregs ipath_7220_cregs = {
- .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
- .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
- .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
- .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
- .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
- .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
- .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
- .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
- .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
- .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
- .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
- .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
- .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
- .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
- .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
- .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
- .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
- .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
- .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
- .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
- .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
- .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
- .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
- .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
- .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
- .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
- .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
- .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
- .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
- .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
- .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
- .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
- .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt),
- .cr_vl15droppedpktcnt = IPATH_CREG_OFFSET(RxVL15DroppedPktCnt),
- .cr_rxotherlocalphyerrcnt =
- IPATH_CREG_OFFSET(RxOtherLocalPhyErrCnt),
- .cr_excessbufferovflcnt = IPATH_CREG_OFFSET(ExcessBufferOvflCnt),
- .cr_locallinkintegrityerrcnt =
- IPATH_CREG_OFFSET(LocalLinkIntegrityErrCnt),
- .cr_rxvlerrcnt = IPATH_CREG_OFFSET(RxVlErrCnt),
- .cr_rxdlidfltrcnt = IPATH_CREG_OFFSET(RxDlidFltrCnt),
- .cr_psstat = IPATH_CREG_OFFSET(PSStat),
- .cr_psstart = IPATH_CREG_OFFSET(PSStart),
- .cr_psinterval = IPATH_CREG_OFFSET(PSInterval),
- .cr_psrcvdatacount = IPATH_CREG_OFFSET(PSRcvDataCount),
- .cr_psrcvpktscount = IPATH_CREG_OFFSET(PSRcvPktsCount),
- .cr_psxmitdatacount = IPATH_CREG_OFFSET(PSXmitDataCount),
- .cr_psxmitpktscount = IPATH_CREG_OFFSET(PSXmitPktsCount),
- .cr_psxmitwaitcount = IPATH_CREG_OFFSET(PSXmitWaitCount),
-};
-
-/* kr_control bits */
-#define INFINIPATH_C_RESET (1U<<7)
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1ULL<<17)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 32
-#define INFINIPATH_I_RCVAVAIL_MASK ((1ULL<<17)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 0
-#define INFINIPATH_I_SERDESTRIMDONE (1ULL<<27)
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-/* specific to this chip */
-#define INFINIPATH_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
-#define INFINIPATH_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
-#define INFINIPATH_HWE_SDMAMEMREADERR 0x0000000010000000ULL
-#define INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
-#define INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
-#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
-#define INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
-#define INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
-
-#define IBA7220_IBCS_LINKTRAININGSTATE_MASK 0x1F
-#define IBA7220_IBCS_LINKSTATE_SHIFT 5
-#define IBA7220_IBCS_LINKSPEED_SHIFT 8
-#define IBA7220_IBCS_LINKWIDTH_SHIFT 9
-
-#define IBA7220_IBCC_LINKINITCMD_MASK 0x7ULL
-#define IBA7220_IBCC_LINKCMD_SHIFT 19
-#define IBA7220_IBCC_MAXPKTLEN_SHIFT 21
-
-/* kr_ibcddrctrl bits */
-#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
-#define IBA7220_IBC_DLIDLMC_SHIFT 32
-#define IBA7220_IBC_HRTBT_MASK 3
-#define IBA7220_IBC_HRTBT_SHIFT 16
-#define IBA7220_IBC_HRTBT_ENB 0x10000UL
-#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
-#define IBA7220_IBC_LREV_MASK 1
-#define IBA7220_IBC_LREV_SHIFT 8
-#define IBA7220_IBC_RXPOL_MASK 1
-#define IBA7220_IBC_RXPOL_SHIFT 7
-#define IBA7220_IBC_WIDTH_SHIFT 5
-#define IBA7220_IBC_WIDTH_MASK 0x3
-#define IBA7220_IBC_WIDTH_1X_ONLY (0<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_4X_ONLY (1<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_AUTONEG (2<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_SPEED_AUTONEG (1<<1)
-#define IBA7220_IBC_SPEED_SDR (1<<2)
-#define IBA7220_IBC_SPEED_DDR (1<<3)
-#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7<<1)
-#define IBA7220_IBC_IBTA_1_2_MASK (1)
-
-/* kr_ibcddrstatus */
-/* link latency shift is 0, don't bother defining */
-#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_DISABLED 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET 0x5ULL
-#define INFINIPATH_XGXS_FC_SAFE (1ULL<<63)
-
-/* kr_rcvpktledcnt */
-#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
-#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA (1ULL << \
- (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL (1ULL << \
- (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-#define IBA7220_R_INTRAVAIL_SHIFT 17
-#define IBA7220_R_TAILUPD_SHIFT 35
-#define IBA7220_R_PORTCFG_SHIFT 36
-
-#define INFINIPATH_JINT_PACKETSHIFT 16
-#define INFINIPATH_JINT_DEFAULT_IDLE_TICKS 0
-#define INFINIPATH_JINT_DEFAULT_MAX_PACKETS 0
-
-#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
-
-/*
- * the size bits give us 2^N, in KB units. 0 marks as invalid,
- * and 7 is reserved. We currently use only 2KB and 4KB
- */
-#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
-#define IBA7220_TID_SZ_2K (1UL<<IBA7220_TID_SZ_SHIFT) /* 2KB */
-#define IBA7220_TID_SZ_4K (2UL<<IBA7220_TID_SZ_SHIFT) /* 4KB */
-#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
-
-#define IPATH_AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
-
-static char int_type[16] = "auto";
-module_param_string(interrupt_type, int_type, sizeof(int_type), 0444);
-MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx");
-
-/* packet rate matching delay; chip has support */
-static u8 rate_to_delay[2][2] = {
- /* 1x, 4x */
- { 8, 2 }, /* SDR */
- { 4, 1 } /* DDR */
-};
-
-/* 7220 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_7220_hwerror_msgs[] = {
- INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
- INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
- /*
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
- INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
- INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
- INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
- INFINIPATH_HWE_MSG(PCIECPLDATAQUEUEERR, "PCIe cpl header queue"),
- INFINIPATH_HWE_MSG(PCIECPLHDRQUEUEERR, "PCIe cpl data queue"),
- INFINIPATH_HWE_MSG(SDMAMEMREADERR, "Send DMA memory read"),
- INFINIPATH_HWE_MSG(CLK_UC_PLLNOTLOCKED, "uC PLL clock not locked"),
- INFINIPATH_HWE_MSG(PCIESERDESQ0PCLKNOTDETECT,
- "PCIe serdes Q0 no clock"),
- INFINIPATH_HWE_MSG(PCIESERDESQ1PCLKNOTDETECT,
- "PCIe serdes Q1 no clock"),
- INFINIPATH_HWE_MSG(PCIESERDESQ2PCLKNOTDETECT,
- "PCIe serdes Q2 no clock"),
- INFINIPATH_HWE_MSG(PCIESERDESQ3PCLKNOTDETECT,
- "PCIe serdes Q3 no clock"),
- INFINIPATH_HWE_MSG(DDSRXEQMEMORYPARITYERR,
- "DDS RXEQ memory parity"),
- INFINIPATH_HWE_MSG(IB_UC_MEMORYPARITYERR, "IB uC memory parity"),
- INFINIPATH_HWE_MSG(PCIE_UC_OCT0MEMORYPARITYERR,
- "PCIe uC oct0 memory parity"),
- INFINIPATH_HWE_MSG(PCIE_UC_OCT1MEMORYPARITYERR,
- "PCIe uC oct1 memory parity"),
-};
-
-static void autoneg_work(struct work_struct *);
-
-/*
- * the offset is different for different configured port numbers, since
- * port0 is fixed in size, but others can vary. Make it a function to
- * make the issue more obvious.
-*/
-static inline u32 port_egrtid_idx(struct ipath_devdata *dd, unsigned port)
-{
- return port ? dd->ipath_p0_rcvegrcnt +
- (port-1) * dd->ipath_rcvegrcnt : 0;
-}
-
-static void ipath_7220_txe_recover(struct ipath_devdata *dd)
-{
- ++ipath_stats.sps_txeparity;
-
- dev_info(&dd->pcidev->dev,
- "Recovering from TXE PIO parity error\n");
- ipath_disarm_senderrbufs(dd);
-}
-
-
-/**
- * ipath_7220_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
- size_t msgl)
-{
- ipath_err_t hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char bitsmsg[64];
- int log_idx;
-
- hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
- if (!hwerrs) {
- /*
- * better than printing cofusing messages
- * This seems to be related to clearing the crc error, or
- * the pll error during init.
- */
- ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
- goto bail;
- } else if (hwerrs == ~0ULL) {
- ipath_dev_err(dd, "Read of hardware error status failed "
- "(all bits set); ignoring\n");
- goto bail;
- }
- ipath_stats.sps_hwerrs++;
-
- /*
- * Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support.
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
- hwerrs &= dd->ipath_hwerrmask;
-
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
- ipath_inc_eeprom_err(dd, log_idx, 1);
- /*
- * Make sure we get this much out, unless told to be quiet,
- * or it's occurred within the last 5 seconds.
- */
- if ((hwerrs & ~(dd->ipath_lasthwerror |
- ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
- (ipath_debug & __IPATH_VERBDBG))
- dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
- "(cleared)\n", (unsigned long long) hwerrs);
- dd->ipath_lasthwerror |= hwerrs;
-
- if (hwerrs & ~dd->ipath_hwe_bitsextant)
- ipath_dev_err(dd, "hwerror interrupt with unknown errors "
- "%llx set\n", (unsigned long long)
- (hwerrs & ~dd->ipath_hwe_bitsextant));
-
- if (hwerrs & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR)
- ipath_sd7220_clr_ibpar(dd);
-
- ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
- if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
- /*
- * Parity errors in send memory are recoverable by h/w
- * just do housekeeping, exit freeze mode and continue.
- */
- if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
- ipath_7220_txe_recover(dd);
- hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
- }
- if (hwerrs) {
- /*
- * If any set that we aren't ignoring only make the
- * complaint once, in case it's stuck or recurring,
- * and we get here multiple times
- * Force link down, so switch knows, and
- * LEDs are turned off.
- */
- if (dd->ipath_flags & IPATH_INITTED) {
- ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
- ipath_setup_7220_setextled(dd,
- INFINIPATH_IBCS_L_STATE_DOWN,
- INFINIPATH_IBCS_LT_STATE_DISABLED);
- ipath_dev_err(dd, "Fatal Hardware Error "
- "(freeze mode), no longer"
- " usable, SN %.16s\n",
- dd->ipath_serial);
- isfatal = 1;
- }
- /*
- * Mark as having had an error for driver, and also
- * for /sys and status word mapped to user programs.
- * This marks unit as not usable, until reset.
- */
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- dd->ipath_flags &= ~IPATH_INITTED;
- } else {
- ipath_dbg("Clearing freezemode on ignored or "
- "recovered hardware error\n");
- ipath_clear_freeze(dd);
- }
- }
-
- *msg = '\0';
-
- if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
- strlcat(msg, "[Memory BIST test failed, "
- "InfiniPath hardware unusable]", msgl);
- /* ignore from now on, so disable until driver reloaded */
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- ipath_format_hwerrors(hwerrs,
- ipath_7220_hwerror_msgs,
- ARRAY_SIZE(ipath_7220_hwerror_msgs),
- msg, msgl);
-
- if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
- << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
- INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
- INFINIPATH_HWE_COREPLL_RFSLIP)
-
- if (hwerrs & _IPATH_PLL_FAIL) {
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the eternal
- * interface is unused.
- */
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- ipath_dev_err(dd, "%s hardware error\n", msg);
- /*
- * For /sys status file. if no trailing } is copied, we'll
- * know it was truncated.
- */
- if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
- snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
- "{%s}", msg);
-bail:;
-}
-
-/**
- * ipath_7220_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * info is based on the board revision register
- */
-static int ipath_7220_boardname(struct ipath_devdata *dd, char *name,
- size_t namelen)
-{
- char *n = NULL;
- u8 boardrev = dd->ipath_boardrev;
- int ret;
-
- if (boardrev == 15) {
- /*
- * Emulator sometimes comes up all-ones, rather than zero.
- */
- boardrev = 0;
- dd->ipath_boardrev = boardrev;
- }
- switch (boardrev) {
- case 0:
- n = "InfiniPath_7220_Emulation";
- break;
- case 1:
- n = "InfiniPath_QLE7240";
- break;
- case 2:
- n = "InfiniPath_QLE7280";
- break;
- case 3:
- n = "InfiniPath_QLE7242";
- break;
- case 4:
- n = "InfiniPath_QEM7240";
- break;
- case 5:
- n = "InfiniPath_QMI7240";
- break;
- case 6:
- n = "InfiniPath_QMI7264";
- break;
- case 7:
- n = "InfiniPath_QMH7240";
- break;
- case 8:
- n = "InfiniPath_QME7240";
- break;
- case 9:
- n = "InfiniPath_QLE7250";
- break;
- case 10:
- n = "InfiniPath_QLE7290";
- break;
- case 11:
- n = "InfiniPath_QEM7250";
- break;
- case 12:
- n = "InfiniPath_QLE-Bringup";
- break;
- default:
- ipath_dev_err(dd,
- "Don't yet know about board with ID %u\n",
- boardrev);
- snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
- boardrev);
- break;
- }
- if (n)
- snprintf(name, namelen, "%s", n);
-
- if (dd->ipath_majrev != 5 || !dd->ipath_minrev ||
- dd->ipath_minrev > 2) {
- ipath_dev_err(dd, "Unsupported InfiniPath hardware "
- "revision %u.%u!\n",
- dd->ipath_majrev, dd->ipath_minrev);
- ret = 1;
- } else if (dd->ipath_minrev == 1 &&
- !(dd->ipath_flags & IPATH_INITTED)) {
- /* Rev1 chips are prototype. Complain at init, but allow use */
- ipath_dev_err(dd, "Unsupported hardware "
- "revision %u.%u, Contact support@qlogic.com\n",
- dd->ipath_majrev, dd->ipath_minrev);
- ret = 0;
- } else
- ret = 0;
-
- /*
- * Set here not in ipath_init_*_funcs because we have to do
- * it after we can read chip registers.
- */
- dd->ipath_ureg_align = 0x10000; /* 64KB alignment */
-
- return ret;
-}
-
-/**
- * ipath_7220_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_7220_init_hwerrors(struct ipath_devdata *dd)
-{
- ipath_err_t val;
- u64 extsval;
-
- extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
- if (!(extsval & (INFINIPATH_EXTS_MEMBIST_ENDTEST |
- INFINIPATH_EXTS_MEMBIST_DISABLED)))
- ipath_dev_err(dd, "MemBIST did not complete!\n");
- if (extsval & INFINIPATH_EXTS_MEMBIST_DISABLED)
- dev_info(&dd->pcidev->dev, "MemBIST is disabled.\n");
-
- val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
-
- if (!dd->ipath_boardrev) /* no PLL for Emulator */
- val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-
- if (dd->ipath_minrev == 1)
- val &= ~(1ULL << 42); /* TXE LaunchFIFO Parity rev1 issue */
-
- val &= ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
- dd->ipath_hwerrmask = val;
-
- /*
- * special trigger "error" is for debugging purposes. It
- * works around a processor/chipset problem. The error
- * interrupt allows us to count occurrences, but we don't
- * want to pay the overhead for normal use. Emulation only
- */
- if (!dd->ipath_boardrev)
- dd->ipath_maskederrs = INFINIPATH_E_SENDSPECIALTRIGGER;
-}
-
-/*
- * All detailed interaction with the SerDes has been moved to ipath_sd7220.c
- *
- * The portion of IBA7220-specific bringup_serdes() that actually deals with
- * registers and memory within the SerDes itself is ipath_sd7220_init().
- */
-
-/**
- * ipath_7220_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
-{
- int ret = 0;
- u64 val, prev_val, guid;
- int was_reset; /* Note whether uC was reset */
-
- ipath_dbg("Trying to bringup serdes\n");
-
- if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
- INFINIPATH_HWE_SERDESPLLFAILED) {
- ipath_dbg("At start, serdes PLL failed bit set "
- "in hwerrstatus, clearing and continuing\n");
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- INFINIPATH_HWE_SERDESPLLFAILED);
- }
-
- dd->ibdeltainprog = 1;
- dd->ibsymsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
-
- if (!dd->ipath_ibcddrctrl) {
- /* not on re-init after reset */
- dd->ipath_ibcddrctrl =
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrctrl);
-
- if (dd->ipath_link_speed_enabled ==
- (IPATH_IB_SDR | IPATH_IB_DDR))
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- dd->ipath_ibcddrctrl |=
- dd->ipath_link_speed_enabled == IPATH_IB_DDR
- ? IBA7220_IBC_SPEED_DDR :
- IBA7220_IBC_SPEED_SDR;
- if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X |
- IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X))
- dd->ipath_ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
- else
- dd->ipath_ibcddrctrl |=
- dd->ipath_link_width_enabled == IB_WIDTH_4X
- ? IBA7220_IBC_WIDTH_4X_ONLY :
- IBA7220_IBC_WIDTH_1X_ONLY;
-
- /* always enable these on driver reload, not sticky */
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
- /*
- * automatic lane reversal detection for receive
- * doesn't work correctly in rev 1, so disable it
- * on that rev, otherwise enable (disabling not
- * sticky across reload for >rev1)
- */
- if (dd->ipath_minrev == 1)
- dd->ipath_ibcddrctrl &=
- ~IBA7220_IBC_LANE_REV_SUPPORTED;
- else
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_LANE_REV_SUPPORTED;
- }
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
- dd->ipath_ibcddrctrl);
-
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0Ull);
-
- /* IBA7220 has SERDES MPU reset in D0 of what _was_ IBPLLCfg */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
- /* remember if uC was in Reset or not, for dactrim */
- was_reset = (val & 1);
- ipath_cdbg(VERBOSE, "IBReset %s xgxsconfig %llx\n",
- was_reset ? "Asserted" : "Negated", (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
- if (dd->ipath_boardrev) {
- /*
- * Hardware is not emulator, and may have been reset. Init it.
- * Below will release reset, but needs to know if chip was
- * originally in reset, to only trim DACs on first time
- * after chip reset or powercycle (not driver reload)
- */
- ret = ipath_sd7220_init(dd, was_reset);
- }
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- prev_val = val;
- val |= INFINIPATH_XGXS_FC_SAFE;
- if (val != prev_val) {
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- }
- if (val & INFINIPATH_XGXS_RESET)
- val &= ~INFINIPATH_XGXS_RESET;
- if (val != prev_val)
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
- ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n",
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig),
- (unsigned long long) prev_val);
-
- guid = be64_to_cpu(dd->ipath_guid);
-
- if (!guid) {
- /* have to have something, so use likely unique tsc */
- guid = get_cycles();
- ipath_dbg("No GUID for heartbeat, faking %llx\n",
- (unsigned long long)guid);
- } else
- ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n",
- (unsigned long long) guid);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid);
- return ret;
-}
-
-static void ipath_7220_config_jint(struct ipath_devdata *dd,
- u16 idle_ticks, u16 max_packets)
-{
-
- /*
- * We can request a receive interrupt for 1 or more packets
- * from current offset.
- */
- if (idle_ticks == 0 || max_packets == 0)
- /* interrupt after one packet if no mitigation */
- dd->ipath_rhdrhead_intr_off =
- 1ULL << IBA7220_HDRHEAD_PKTINT_SHIFT;
- else
- /* Turn off RcvHdrHead interrupts if using mitigation */
- dd->ipath_rhdrhead_intr_off = 0ULL;
-
- /* refresh kernel RcvHdrHead registers... */
- ipath_write_ureg(dd, ur_rcvhdrhead,
- dd->ipath_rhdrhead_intr_off |
- dd->ipath_pd[0]->port_head, 0);
-
- dd->ipath_jint_max_packets = max_packets;
- dd->ipath_jint_idle_ticks = idle_ticks;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_jintreload,
- ((u64) max_packets << INFINIPATH_JINT_PACKETSHIFT) |
- idle_ticks);
-}
-
-/**
- * ipath_7220_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * Called when driver is being unloaded
- */
-static void ipath_7220_quiet_serdes(struct ipath_devdata *dd)
-{
- u64 val;
- if (dd->ibsymdelta || dd->iblnkerrdelta ||
- dd->ibdeltainprog) {
- u64 diagc;
- /* enable counter writes */
- diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl,
- diagc | INFINIPATH_DC_COUNTERWREN);
-
- if (dd->ibsymdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->ibsymsnap;
- val -= dd->ibsymdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt, val);
- }
- if (dd->iblnkerrdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->iblnkerrsnap;
- val -= dd->iblnkerrdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt, val);
- }
-
- /* and disable counter writes */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc);
- }
-
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
- wake_up(&dd->ipath_autoneg_wait);
- cancel_delayed_work(&dd->ipath_autoneg_work);
- flush_scheduled_work();
- ipath_shutdown_relock_poll(dd);
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- val |= INFINIPATH_XGXS_RESET;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-}
-
-static int ipath_7220_intconfig(struct ipath_devdata *dd)
-{
- ipath_7220_config_jint(dd, dd->ipath_jint_idle_ticks,
- dd->ipath_jint_max_packets);
- return 0;
-}
-
-/**
- * ipath_setup_7220_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
- *
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_7220_setextled(struct ipath_devdata *dd, u64 lst,
- u64 ltst)
-{
- u64 extctl, ledblink = 0;
- unsigned long flags = 0;
-
- /* the diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running */
- if (ipath_diag_inuse)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (dd->ipath_led_override) {
- ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
- ? INFINIPATH_IBCS_LT_STATE_LINKUP
- : INFINIPATH_IBCS_LT_STATE_DISABLED;
- lst = (dd->ipath_led_override & IPATH_LED_LOG)
- ? INFINIPATH_IBCS_L_STATE_ACTIVE
- : INFINIPATH_IBCS_L_STATE_DOWN;
- }
-
- spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
- extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
- INFINIPATH_EXTC_LED2PRIPORT_ON);
- if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) {
- extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
- /*
- * counts are in chip clock (4ns) periods.
- * This is 1/16 sec (66.6ms) on,
- * 3/16 sec (187.5 ms) off, with packets rcvd
- */
- ledblink = ((66600*1000UL/4) << IBA7220_LEDBLINK_ON_SHIFT)
- | ((187500*1000UL/4) << IBA7220_LEDBLINK_OFF_SHIFT);
- }
- if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
- extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
- dd->ipath_extctrl = extctl;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
- if (ledblink) /* blink the LED on packet receive */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvpktledcnt,
- ledblink);
-}
-
-/*
- * Similar to pci_intx(pdev, 1), except that we make sure
- * msi is off...
- */
-static void ipath_enable_intx(struct pci_dev *pdev)
-{
- u16 cw, new;
- int pos;
-
- /* first, turn on INTx */
- pci_read_config_word(pdev, PCI_COMMAND, &cw);
- new = cw & ~PCI_COMMAND_INTX_DISABLE;
- if (new != cw)
- pci_write_config_word(pdev, PCI_COMMAND, new);
-
- /* then turn off MSI */
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
- new = cw & ~PCI_MSI_FLAGS_ENABLE;
- if (new != cw)
- pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
- }
-}
-
-static int ipath_msi_enabled(struct pci_dev *pdev)
-{
- int pos, ret = 0;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- u16 cw;
-
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
- ret = !!(cw & PCI_MSI_FLAGS_ENABLE);
- }
- return ret;
-}
-
-/*
- * disable msi interrupt if enabled, and clear the flag.
- * flag is used primarily for the fallback to INTx, but
- * is also used in reinit after reset as a flag.
- */
-static void ipath_7220_nomsi(struct ipath_devdata *dd)
-{
- dd->ipath_msi_lo = 0;
-
- if (ipath_msi_enabled(dd->pcidev)) {
- /*
- * free, but don't zero; later kernels require
- * it be freed before disable_msi, so the intx
- * setup has to request it again.
- */
- if (dd->ipath_irq)
- free_irq(dd->ipath_irq, dd);
- pci_disable_msi(dd->pcidev);
- }
-}
-
-/*
- * ipath_setup_7220_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * Nothing but msi interrupt cleanup for now.
- *
- * This is called during driver unload.
- */
-static void ipath_setup_7220_cleanup(struct ipath_devdata *dd)
-{
- ipath_7220_nomsi(dd);
-}
-
-
-static void ipath_7220_pcie_params(struct ipath_devdata *dd, u32 boardrev)
-{
- u16 linkstat, minwidth, speed;
- int pos;
-
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
- if (!pos) {
- ipath_dev_err(dd, "Can't find PCI Express capability!\n");
- goto bail;
- }
-
- pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
- &linkstat);
- /*
- * speed is bits 0-4, linkwidth is bits 4-8
- * no defines for them in headers
- */
- speed = linkstat & 0xf;
- linkstat >>= 4;
- linkstat &= 0x1f;
- dd->ipath_lbus_width = linkstat;
- switch (boardrev) {
- case 0:
- case 2:
- case 10:
- case 12:
- minwidth = 16; /* x16 capable boards */
- break;
- default:
- minwidth = 8; /* x8 capable boards */
- break;
- }
-
- switch (speed) {
- case 1:
- dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
- break;
- case 2:
- dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
- break;
- default: /* not defined, assume gen1 */
- dd->ipath_lbus_speed = 2500;
- break;
- }
-
- if (linkstat < minwidth)
- ipath_dev_err(dd,
- "PCIe width %u (x%u HCA), performance "
- "reduced\n", linkstat, minwidth);
- else
- ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x%u HCA)\n",
- dd->ipath_lbus_speed, linkstat, minwidth);
-
- if (speed != 1)
- ipath_dev_err(dd,
- "PCIe linkspeed %u is incorrect; "
- "should be 1 (2500)!\n", speed);
-
-bail:
- /* fill in string, even on errors */
- snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
- "PCIe,%uMHz,x%u\n",
- dd->ipath_lbus_speed,
- dd->ipath_lbus_width);
- return;
-}
-
-
-/**
- * ipath_setup_7220_config - setup PCIe config related stuff
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * The pci_enable_msi() call will fail on systems with MSI quirks
- * such as those with AMD8131, even if the device of interest is not
- * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
- * late in 2.6.16).
- * All that can be done is to edit the kernel source to remove the quirk
- * check until that is fixed.
- * We do not need to call enable_msi() for our HyperTransport chip,
- * even though it uses MSI, and we want to avoid the quirk warning, so
- * So we call enable_msi only for PCIe. If we do end up needing
- * pci_enable_msi at some point in the future for HT, we'll move the
- * call back into the main init_one code.
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly).
- */
-static int ipath_setup_7220_config(struct ipath_devdata *dd,
- struct pci_dev *pdev)
-{
- int pos, ret = -1;
- u32 boardrev;
-
- dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (!strcmp(int_type, "force_msi") || !strcmp(int_type, "auto"))
- ret = pci_enable_msi(pdev);
- if (ret) {
- if (!strcmp(int_type, "force_msi")) {
- ipath_dev_err(dd, "pci_enable_msi failed: %d, "
- "force_msi is on, so not continuing.\n",
- ret);
- return ret;
- }
-
- ipath_enable_intx(pdev);
- if (!strcmp(int_type, "auto"))
- ipath_dev_err(dd, "pci_enable_msi failed: %d, "
- "falling back to INTx\n", ret);
- } else if (pos) {
- u16 control;
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
- &dd->ipath_msi_lo);
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
- &dd->ipath_msi_hi);
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS,
- &control);
- /* now save the data (vector) info */
- pci_read_config_word(pdev,
- pos + ((control & PCI_MSI_FLAGS_64BIT)
- ? PCI_MSI_DATA_64 :
- PCI_MSI_DATA_32),
- &dd->ipath_msi_data);
- } else
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't save MSI settings for reset\n");
-
- dd->ipath_irq = pdev->irq;
-
- /*
- * We save the cachelinesize also, although it doesn't
- * really matter.
- */
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
- &dd->ipath_pci_cacheline);
-
- /*
- * this function called early, ipath_boardrev not set yet. Can't
- * use ipath_read_kreg64() yet, too early in init, so use readq()
- */
- boardrev = (readq(&dd->ipath_kregbase[dd->ipath_kregs->kr_revision])
- >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK;
-
- ipath_7220_pcie_params(dd, boardrev);
-
- dd->ipath_flags |= IPATH_NODMA_RTAIL | IPATH_HAS_SEND_DMA |
- IPATH_HAS_PBC_CNT | IPATH_HAS_THRESH_UPDATE;
- dd->ipath_pioupd_thresh = 4U; /* set default update threshold */
- return 0;
-}
-
-static void ipath_init_7220_variables(struct ipath_devdata *dd)
-{
- /*
- * setup the register offsets, since they are different for each
- * chip
- */
- dd->ipath_kregs = &ipath_7220_kregs;
- dd->ipath_cregs = &ipath_7220_cregs;
-
- /*
- * bits for selecting i2c direction and values,
- * used for I2C serial flash
- */
- dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
- dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
- dd->ipath_gpio_sda = IPATH_GPIO_SDA;
- dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
- /*
- * Fill in data for field-values that change in IBA7220.
- * We dynamically specify only the mask for LINKTRAININGSTATE
- * and only the shift for LINKSTATE, as they are the only ones
- * that change. Also precalculate the 3 link states of interest
- * and the combined mask.
- */
- dd->ibcs_ls_shift = IBA7220_IBCS_LINKSTATE_SHIFT;
- dd->ibcs_lts_mask = IBA7220_IBCS_LINKTRAININGSTATE_MASK;
- dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
- dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
- dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
- dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
- dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
- /*
- * Fill in data for ibcc field-values that change in IBA7220.
- * We dynamically specify only the mask for LINKINITCMD
- * and only the shift for LINKCMD and MAXPKTLEN, as they are
- * the only ones that change.
- */
- dd->ibcc_lic_mask = IBA7220_IBCC_LINKINITCMD_MASK;
- dd->ibcc_lc_shift = IBA7220_IBCC_LINKCMD_SHIFT;
- dd->ibcc_mpl_shift = IBA7220_IBCC_MAXPKTLEN_SHIFT;
-
- /* Fill in shifts for RcvCtrl. */
- dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
- dd->ipath_r_intravail_shift = IBA7220_R_INTRAVAIL_SHIFT;
- dd->ipath_r_tailupd_shift = IBA7220_R_TAILUPD_SHIFT;
- dd->ipath_r_portcfg_shift = IBA7220_R_PORTCFG_SHIFT;
-
- /* variables for sanity checking interrupt and errors */
- dd->ipath_hwe_bitsextant =
- (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
- INFINIPATH_HWE_PCIE1PLLFAILED |
- INFINIPATH_HWE_PCIE0PLLFAILED |
- INFINIPATH_HWE_PCIEPOISONEDTLP |
- INFINIPATH_HWE_PCIECPLTIMEOUT |
- INFINIPATH_HWE_PCIEBUSPARITYXTLH |
- INFINIPATH_HWE_PCIEBUSPARITYXADM |
- INFINIPATH_HWE_PCIEBUSPARITYRADM |
- INFINIPATH_HWE_MEMBISTFAILED |
- INFINIPATH_HWE_COREPLL_FBSLIP |
- INFINIPATH_HWE_COREPLL_RFSLIP |
- INFINIPATH_HWE_SERDESPLLFAILED |
- INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
- INFINIPATH_HWE_IBCBUSFRSPCPARITYERR |
- INFINIPATH_HWE_PCIECPLDATAQUEUEERR |
- INFINIPATH_HWE_PCIECPLHDRQUEUEERR |
- INFINIPATH_HWE_SDMAMEMREADERR |
- INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED |
- INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT |
- INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT |
- INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT |
- INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT |
- INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR |
- INFINIPATH_HWE_IB_UC_MEMORYPARITYERR |
- INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR |
- INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR;
- dd->ipath_i_bitsextant =
- INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED |
- (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
- (INFINIPATH_I_RCVAVAIL_MASK <<
- INFINIPATH_I_RCVAVAIL_SHIFT) |
- INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
- INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO |
- INFINIPATH_I_JINT | INFINIPATH_I_SERDESTRIMDONE;
- dd->ipath_e_bitsextant =
- INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
- INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
- INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
- INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
- INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
- INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
- INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
- INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
- INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
- INFINIPATH_E_SENDSPECIALTRIGGER |
- INFINIPATH_E_SDMADISABLED | INFINIPATH_E_SMINPKTLEN |
- INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN |
- INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT |
- INFINIPATH_E_SDROPPEDDATAPKT |
- INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
- INFINIPATH_E_SUNSUPVL | INFINIPATH_E_SENDBUFMISUSE |
- INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND |
- INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE |
- INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG |
- INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW |
- INFINIPATH_E_SDMAUNEXPDATA |
- INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR |
- INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE |
- INFINIPATH_E_SDMADESCADDRMISALIGN |
- INFINIPATH_E_INVALIDEEPCMD;
-
- dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
- dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
- dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
- dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
- dd->ipath_flags |= IPATH_INTREG_64 | IPATH_HAS_MULT_IB_SPEED
- | IPATH_HAS_LINK_LATENCY;
-
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->ipath_eep_st_masks[0].hwerrs_to_log =
- INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
- dd->ipath_eep_st_masks[1].hwerrs_to_log =
- INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
- dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
-
- ipath_linkrecovery = 0;
-
- init_waitqueue_head(&dd->ipath_autoneg_wait);
- INIT_DELAYED_WORK(&dd->ipath_autoneg_work, autoneg_work);
-
- dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- dd->ipath_link_speed_supported = IPATH_IB_SDR | IPATH_IB_DDR;
-
- dd->ipath_link_width_enabled = dd->ipath_link_width_supported;
- dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
- /*
- * set the initial values to reasonable default, will be set
- * for real when link is up.
- */
- dd->ipath_link_width_active = IB_WIDTH_4X;
- dd->ipath_link_speed_active = IPATH_IB_SDR;
- dd->delay_mult = rate_to_delay[0][1];
-}
-
-
-/*
- * Setup the MSI stuff again after a reset. I'd like to just call
- * pci_enable_msi() and request_irq() again, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
- * so I instead just do it all inline. Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- * Note, because I'm doing it all here, I don't call pci_disable_msi()
- * or free_irq() at the start of ipath_setup_7220_reset().
- */
-static int ipath_reinit_msi(struct ipath_devdata *dd)
-{
- int ret = 0;
-
- int pos;
- u16 control;
- if (!dd->ipath_msi_lo) /* Using intX, or init problem */
- goto bail;
-
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
- if (!pos) {
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't restore MSI settings\n");
- goto bail;
- }
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- dd->ipath_msi_lo);
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- dd->ipath_msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE)) {
- ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
- "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
- control, control | PCI_MSI_FLAGS_ENABLE);
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- control);
- }
- /* now rewrite the data (vector) info */
- pci_write_config_word(dd->pcidev, pos +
- ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- dd->ipath_msi_data);
- ret = 1;
-
-bail:
- if (!ret) {
- ipath_dbg("Using INTx, MSI disabled or not configured\n");
- ipath_enable_intx(dd->pcidev);
- ret = 1;
- }
- /*
- * We restore the cachelinesize also, although it doesn't really
- * matter.
- */
- pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
- dd->ipath_pci_cacheline);
- /* and now set the pci master bit again */
- pci_set_master(dd->pcidev);
-
- return ret;
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context. If we need interrupt context, we can split
- * it into two routines.
- */
-static int ipath_setup_7220_reset(struct ipath_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
-
- pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
-
- /* Use dev_err so it shows up in logs, etc. */
- ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
-
- /* keep chip from being accessed in a few places */
- dd->ipath_flags &= ~(IPATH_INITTED | IPATH_PRESENT);
- val = dd->ipath_control | INFINIPATH_C_RESET;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
- mb();
-
- for (i = 1; i <= 5; i++) {
- int r;
-
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
- dd->ipath_pcibar0);
- if (r)
- ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
- dd->ipath_pcibar1);
- if (r)
- ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
- /* now re-enable memory access */
- pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
- r = pci_enable_device(dd->pcidev);
- if (r)
- ipath_dev_err(dd, "pci_enable_device failed after "
- "reset: %d\n", r);
- /*
- * whether it fully enabled or not, mark as present,
- * again (but not INITTED)
- */
- dd->ipath_flags |= IPATH_PRESENT;
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
- if (val == dd->ipath_revision) {
- ipath_cdbg(VERBOSE, "Got matching revision "
- "register %llx on try %d\n",
- (unsigned long long) val, i);
- ret = ipath_reinit_msi(dd);
- goto bail;
- }
- /* Probably getting -1 back */
- ipath_dbg("Didn't get expected revision register, "
- "got %llx, try %d\n", (unsigned long long) val,
- i + 1);
- }
- ret = 0; /* failed */
-
-bail:
- if (ret)
- ipath_7220_pcie_params(dd, dd->ipath_boardrev);
-
- return ret;
-}
-
-/**
- * ipath_7220_put_tid - write a TID to the chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for selection of the
- * appropriate "flavor". The static calls in cleanup just use the
- * revision-agnostic form, as they are not performance critical.
- */
-static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- if (pa != dd->ipath_tidinvalid) {
- u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
-
- /* paranoia checks */
- if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
- dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
- "not 2KB aligned!\n", pa);
- return;
- }
- if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
- ipath_dev_err(dd,
- "BUG: Physical page address 0x%lx "
- "larger than supported\n", pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- chippa |= dd->ipath_tidtemplate;
- else /* for now, always full 4KB page */
- chippa |= IBA7220_TID_SZ_4K;
- writeq(chippa, tidptr);
- } else
- writeq(pa, tidptr);
- mmiowb();
-}
-
-/**
- * ipath_7220_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * clear all TID entries for a port, expected and eager.
- * Used from ipath_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void ipath_7220_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- int i;
-
- if (!dd->ipath_kregbase)
- return;
-
- ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
- tidinv = dd->ipath_tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvtidbase +
- port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->ipath_rcvtidcnt; i++)
- ipath_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvegrbase + port_egrtid_idx(dd, port)
- * sizeof(*tidbase));
-
- for (i = port ? dd->ipath_rcvegrcnt : dd->ipath_p0_rcvegrcnt; i; i--)
- ipath_7220_put_tid(dd, &tidbase[i-1], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * ipath_7220_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_7220_tidtemplate(struct ipath_devdata *dd)
-{
- /* For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make be per port instead of per device
- * for those who want to reduce memory footprint. Note that the
- * ipath_rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (dd->ipath_rcvegrbufsize == 2048)
- dd->ipath_tidtemplate = IBA7220_TID_SZ_2K;
- else if (dd->ipath_rcvegrbufsize == 4096)
- dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
- else {
- dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
- "%u, using %u\n", dd->ipath_rcvegrbufsize,
- 4096);
- dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
- }
- dd->ipath_tidinvalid = 0;
-}
-
-static int ipath_7220_early_init(struct ipath_devdata *dd)
-{
- u32 i, s;
-
- if (strcmp(int_type, "auto") &&
- strcmp(int_type, "force_msi") &&
- strcmp(int_type, "force_intx")) {
- ipath_dev_err(dd, "Invalid interrupt_type: '%s', expecting "
- "auto, force_msi or force_intx\n", int_type);
- return -EINVAL;
- }
-
- /*
- * Control[4] has been added to change the arbitration within
- * the SDMA engine between favoring data fetches over descriptor
- * fetches. ipath_sdma_fetch_arb==0 gives data fetches priority.
- */
- if (ipath_sdma_fetch_arb && (dd->ipath_minrev > 1))
- dd->ipath_control |= 1<<4;
-
- dd->ipath_flags |= IPATH_4BYTE_TID;
-
- /*
- * For openfabrics, we need to be able to handle an IB header of
- * 24 dwords. HT chip has arbitrary sized receive buffers, so we
- * made them the same size as the PIO buffers. This chip does not
- * handle arbitrary size buffers, so we need the header large enough
- * to handle largest IB header, but still have room for a 2KB MTU
- * standard IB packet.
- */
- dd->ipath_rcvhdrentsize = 24;
- dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
- dd->ipath_rhf_offset =
- dd->ipath_rcvhdrentsize - sizeof(u64) / sizeof(u32);
-
- dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
- /*
- * the min() check here is currently a nop, but it may not always
- * be, depending on just how we do ipath_rcvegrbufsize
- */
- dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
- dd->ipath_piosize2k,
- dd->ipath_rcvegrbufsize +
- (dd->ipath_rcvhdrentsize << 2));
- dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-
- ipath_7220_config_jint(dd, INFINIPATH_JINT_DEFAULT_IDLE_TICKS,
- INFINIPATH_JINT_DEFAULT_MAX_PACKETS);
-
- if (dd->ipath_boardrev) /* no eeprom on emulator */
- ipath_get_eeprom_info(dd);
-
- /* start of code to check and print procmon */
- s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
- s &= ~(1U<<31); /* clear done bit */
- s |= 1U<<14; /* clear counter (write 1 to clear) */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
- /* make sure clear_counter low long enough before start */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-
- s &= ~(1U<<14); /* allow counter to count (before starting) */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
-
- s |= 1U<<15; /* start the counter */
- s &= ~(1U<<31); /* clear done bit */
- s &= ~0x7ffU; /* clear frequency bits */
- s |= 0xe29; /* set frequency bits, in case cleared */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
-
- s = 0;
- for (i = 500; i > 0 && !(s&(1ULL<<31)); i--) {
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
- }
- if (!(s&(1U<<31)))
- ipath_dev_err(dd, "ProcMon register not valid: 0x%x\n", s);
- else
- ipath_dbg("ProcMon=0x%x, count=0x%x\n", s, (s>>16)&0x1ff);
-
- return 0;
-}
-
-/**
- * ipath_init_7220_get_base_info - set chip-specific flags for user code
- * @pd: the infinipath port
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-static int ipath_7220_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
- struct ipath_base_info *kinfo = kbase;
-
- kinfo->spi_runtime_flags |=
- IPATH_RUNTIME_PCIE | IPATH_RUNTIME_NODMA_RTAIL |
- IPATH_RUNTIME_SDMA;
-
- return 0;
-}
-
-static void ipath_7220_free_irq(struct ipath_devdata *dd)
-{
- free_irq(dd->ipath_irq, dd);
- dd->ipath_irq = 0;
-}
-
-static struct ipath_message_header *
-ipath_7220_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
- u32 offset = ipath_hdrget_offset(rhf_addr);
-
- return (struct ipath_message_header *)
- (rhf_addr - dd->ipath_rhf_offset + offset);
-}
-
-static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
- u32 nchipports;
-
- nchipports = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
- if (!cfgports) {
- int ncpus = num_online_cpus();
-
- if (ncpus <= 4)
- dd->ipath_portcnt = 5;
- else if (ncpus <= 8)
- dd->ipath_portcnt = 9;
- if (dd->ipath_portcnt)
- ipath_dbg("Auto-configured for %u ports, %d cpus "
- "online\n", dd->ipath_portcnt, ncpus);
- } else if (cfgports <= nchipports)
- dd->ipath_portcnt = cfgports;
- if (!dd->ipath_portcnt) /* none of the above, set to max */
- dd->ipath_portcnt = nchipports;
- /*
- * chip can be configured for 5, 9, or 17 ports, and choice
- * affects number of eager TIDs per port (1K, 2K, 4K).
- */
- if (dd->ipath_portcnt > 9)
- dd->ipath_rcvctrl |= 2ULL << IBA7220_R_PORTCFG_SHIFT;
- else if (dd->ipath_portcnt > 5)
- dd->ipath_rcvctrl |= 1ULL << IBA7220_R_PORTCFG_SHIFT;
- /* else configure for default 5 receive ports */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
- dd->ipath_p0_rcvegrcnt = 2048; /* always */
- if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
- dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */
-}
-
-
-static int ipath_7220_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
- int lsb, ret = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
- case IPATH_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- case IPATH_IB_CFG_LWID_ENB: /* Get allowed Link-width */
- ret = dd->ipath_link_width_enabled;
- goto done;
-
- case IPATH_IB_CFG_LWID: /* Get currently active Link-width */
- ret = dd->ipath_link_width_active;
- goto done;
-
- case IPATH_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
- ret = dd->ipath_link_speed_enabled;
- goto done;
-
- case IPATH_IB_CFG_SPD: /* Get current Link spd */
- ret = dd->ipath_link_speed_active;
- goto done;
-
- case IPATH_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case IPATH_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- case IPATH_IB_CFG_LINKLATENCY:
- ret = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrstatus)
- & IBA7220_DDRSTAT_LINKLAT_MASK;
- goto done;
-
- default:
- ret = -ENOTSUPP;
- goto done;
- }
- ret = (int)((dd->ipath_ibcddrctrl >> lsb) & maskr);
-done:
- return ret;
-}
-
-static int ipath_7220_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
- int lsb, ret = 0, setforce = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
- case IPATH_IB_CFG_LIDLMC:
- /*
- * Set LID and LMC. Combined to avoid possible hazard
- * caller puts LMC in 16MSbits, DLID in 16LSbits of val
- */
- lsb = IBA7220_IBC_DLIDLMC_SHIFT;
- maskr = IBA7220_IBC_DLIDLMC_MASK;
- break;
-
- case IPATH_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
- if (val & IPATH_IB_HRTBT_ON &&
- (dd->ipath_flags & IPATH_NO_HRTBT))
- goto bail;
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- case IPATH_IB_CFG_LWID_ENB: /* set allowed Link-width */
- /*
- * As with speed, only write the actual register if
- * the link is currently down, otherwise takes effect
- * on next link change.
- */
- dd->ipath_link_width_enabled = val;
- if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
- IPATH_LINKDOWN)
- goto bail;
- /*
- * We set the IPATH_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_width_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition.
- */
- val--; /* convert from IB to chip */
- maskr = IBA7220_IBC_WIDTH_MASK;
- lsb = IBA7220_IBC_WIDTH_SHIFT;
- setforce = 1;
- dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
- break;
-
- case IPATH_IB_CFG_SPD_ENB: /* set allowed Link speeds */
- /*
- * If we turn off IB1.2, need to preset SerDes defaults,
- * but not right now. Set a flag for the next time
- * we command the link down. As with width, only write the
- * actual register if the link is currently down, otherwise
- * takes effect on next link change. Since setting is being
- * explictly requested (via MAD or sysfs), clear autoneg
- * failure status if speed autoneg is enabled.
- */
- dd->ipath_link_speed_enabled = val;
- if (dd->ipath_ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK &&
- !(val & (val - 1)))
- dd->ipath_presets_needed = 1;
- if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
- IPATH_LINKDOWN)
- goto bail;
- /*
- * We set the IPATH_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_speed_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition. When setting
- * speed autoneg, clear AUTONEG_FAILED.
- */
- if (val == (IPATH_IB_SDR | IPATH_IB_DDR)) {
- val = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
- } else
- val = val == IPATH_IB_DDR ? IBA7220_IBC_SPEED_DDR
- : IBA7220_IBC_SPEED_SDR;
- maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- lsb = 0; /* speed bits are low bits */
- setforce = 1;
- break;
-
- case IPATH_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case IPATH_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- default:
- ret = -ENOTSUPP;
- goto bail;
- }
- dd->ipath_ibcddrctrl &= ~(maskr << lsb);
- dd->ipath_ibcddrctrl |= (((u64) val & maskr) << lsb);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
- dd->ipath_ibcddrctrl);
- if (setforce)
- dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
-bail:
- return ret;
-}
-
-static void ipath_7220_read_counters(struct ipath_devdata *dd,
- struct infinipath_counters *cntrs)
-{
- u64 *counters = (u64 *) cntrs;
- int i;
-
- for (i = 0; i < sizeof(*cntrs) / sizeof(u64); i++)
- counters[i] = ipath_snap_cntr(dd, i);
-}
-
-/* if we are using MSI, try to fallback to INTx */
-static int ipath_7220_intr_fallback(struct ipath_devdata *dd)
-{
- if (dd->ipath_msi_lo) {
- dev_info(&dd->pcidev->dev, "MSI interrupt not detected,"
- " trying INTx interrupts\n");
- ipath_7220_nomsi(dd);
- ipath_enable_intx(dd->pcidev);
- /*
- * some newer kernels require free_irq before disable_msi,
- * and irq can be changed during disable and intx enable
- * and we need to therefore use the pcidev->irq value,
- * not our saved MSI value.
- */
- dd->ipath_irq = dd->pcidev->irq;
- if (request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
- IPATH_DRV_NAME, dd))
- ipath_dev_err(dd,
- "Could not re-request_irq for INTx\n");
- return 1;
- }
- return 0;
-}
-
-/*
- * reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void ipath_7220_xgxs_reset(struct ipath_devdata *dd)
-{
- u64 val, prev_val;
-
- prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- val = prev_val | INFINIPATH_XGXS_RESET;
- prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
-}
-
-
-/* Still needs cleanup, too much hardwired stuff */
-static void autoneg_send(struct ipath_devdata *dd,
- u32 *hdr, u32 dcnt, u32 *data)
-{
- int i;
- u64 cnt;
- u32 __iomem *piobuf;
- u32 pnum;
-
- i = 0;
- cnt = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
- while (!(piobuf = ipath_getpiobuf(dd, cnt, &pnum))) {
- if (i++ > 15) {
- ipath_dbg("Couldn't get pio buffer for send\n");
- return;
- }
- udelay(2);
- }
- if (dd->ipath_flags&IPATH_HAS_PBC_CNT)
- cnt |= 0x80000000UL<<32; /* mark as VL15 */
- writeq(cnt, piobuf);
- ipath_flush_wc();
- __iowrite32_copy(piobuf + 2, hdr, 7);
- __iowrite32_copy(piobuf + 9, data, dcnt);
- ipath_flush_wc();
-}
-
-/*
- * _start packet gets sent twice at start, _done gets sent twice at end
- */
-static void ipath_autoneg_send(struct ipath_devdata *dd, int which)
-{
- static u32 swapped;
- u32 dw, i, hcnt, dcnt, *data;
- static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
- static u32 madpayload_start[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
- };
- static u32 madpayload_done[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x40000001, 0x1388, 0x15e, /* rest 0's */
- };
- dcnt = ARRAY_SIZE(madpayload_start);
- hcnt = ARRAY_SIZE(hdr);
- if (!swapped) {
- /* for maintainability, do it at runtime */
- for (i = 0; i < hcnt; i++) {
- dw = (__force u32) cpu_to_be32(hdr[i]);
- hdr[i] = dw;
- }
- for (i = 0; i < dcnt; i++) {
- dw = (__force u32) cpu_to_be32(madpayload_start[i]);
- madpayload_start[i] = dw;
- dw = (__force u32) cpu_to_be32(madpayload_done[i]);
- madpayload_done[i] = dw;
- }
- swapped = 1;
- }
-
- data = which ? madpayload_done : madpayload_start;
- ipath_cdbg(PKT, "Sending %s special MADs\n", which?"done":"start");
-
- autoneg_send(dd, hdr, dcnt, data);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- udelay(2);
- autoneg_send(dd, hdr, dcnt, data);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- udelay(2);
-}
-
-
-
-/*
- * Do the absolute minimum to cause an IB speed change, and make it
- * ready, but don't actually trigger the change. The caller will
- * do that when ready (if link is in Polling training state, it will
- * happen immediately, otherwise when link next goes down)
- *
- * This routine should only be used as part of the DDR autonegotation
- * code for devices that are not compliant with IB 1.2 (or code that
- * fixes things up for same).
- *
- * When link has gone down, and autoneg enabled, or autoneg has
- * failed and we give up until next time we set both speeds, and
- * then we want IBTA enabled as well as "use max enabled speed.
- */
-static void set_speed_fast(struct ipath_devdata *dd, u32 speed)
-{
- dd->ipath_ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK |
- (IBA7220_IBC_WIDTH_MASK << IBA7220_IBC_WIDTH_SHIFT));
-
- if (speed == (IPATH_IB_SDR | IPATH_IB_DDR))
- dd->ipath_ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- dd->ipath_ibcddrctrl |= speed == IPATH_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
-
- /*
- * Convert from IB-style 1 = 1x, 2 = 4x, 3 = auto
- * to chip-centric 0 = 1x, 1 = 4x, 2 = auto
- */
- dd->ipath_ibcddrctrl |= (u64)(dd->ipath_link_width_enabled - 1) <<
- IBA7220_IBC_WIDTH_SHIFT;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
- dd->ipath_ibcddrctrl);
- ipath_cdbg(VERBOSE, "setup for IB speed (%x) done\n", speed);
-}
-
-
-/*
- * this routine is only used when we are not talking to another
- * IB 1.2-compliant device that we think can do DDR.
- * (This includes all existing switch chips as of Oct 2007.)
- * 1.2-compliant devices go directly to DDR prior to reaching INIT
- */
-static void try_auto_neg(struct ipath_devdata *dd)
-{
- /*
- * required for older non-IB1.2 DDR switches. Newer
- * non-IB-compliant switches don't need it, but so far,
- * aren't bothered by it either. "Magic constant"
- */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl),
- 0x3b9dc07);
- dd->ipath_flags |= IPATH_IB_AUTONEG_INPROG;
- ipath_autoneg_send(dd, 0);
- set_speed_fast(dd, IPATH_IB_DDR);
- ipath_toggle_rclkrls(dd);
- /* 2 msec is minimum length of a poll cycle */
- schedule_delayed_work(&dd->ipath_autoneg_work,
- msecs_to_jiffies(2));
-}
-
-
-static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
- int ret = 0, symadj = 0;
- u32 ltstate = ipath_ib_linkstate(dd, ibcs);
-
- dd->ipath_link_width_active =
- ((ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- dd->ipath_link_speed_active =
- ((ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1) ?
- IPATH_IB_DDR : IPATH_IB_SDR;
-
- if (!ibup) {
- /*
- * when link goes down we don't want aeq running, so it
- * won't't interfere with IBC training, etc., and we need
- * to go back to the static SerDes preset values
- */
- if (dd->ipath_x1_fix_tries &&
- ltstate <= INFINIPATH_IBCS_LT_STATE_SLEEPQUIET &&
- ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP)
- dd->ipath_x1_fix_tries = 0;
- if (!(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
- IPATH_IB_AUTONEG_INPROG)))
- set_speed_fast(dd, dd->ipath_link_speed_enabled);
- if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
- ipath_cdbg(VERBOSE, "Setting RXEQ defaults\n");
- ipath_sd7220_presets(dd);
- }
- /* this might better in ipath_sd7220_presets() */
- ipath_set_relock_poll(dd, ibup);
- } else {
- if (ipath_compat_ddr_negotiate &&
- !(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
- IPATH_IB_AUTONEG_INPROG)) &&
- dd->ipath_link_speed_active == IPATH_IB_SDR &&
- (dd->ipath_link_speed_enabled &
- (IPATH_IB_DDR | IPATH_IB_SDR)) ==
- (IPATH_IB_DDR | IPATH_IB_SDR) &&
- dd->ipath_autoneg_tries < IPATH_AUTONEG_TRIES) {
- /* we are SDR, and DDR auto-negotiation enabled */
- ++dd->ipath_autoneg_tries;
- ipath_dbg("DDR negotiation try, %u/%u\n",
- dd->ipath_autoneg_tries,
- IPATH_AUTONEG_TRIES);
- if (!dd->ibdeltainprog) {
- dd->ibdeltainprog = 1;
- dd->ibsymsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- }
- try_auto_neg(dd);
- ret = 1; /* no other IB status change processing */
- } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
- && dd->ipath_link_speed_active == IPATH_IB_SDR) {
- ipath_autoneg_send(dd, 1);
- set_speed_fast(dd, IPATH_IB_DDR);
- udelay(2);
- ipath_toggle_rclkrls(dd);
- ret = 1; /* no other IB status change processing */
- } else {
- if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
- (dd->ipath_link_speed_active & IPATH_IB_DDR)) {
- ipath_dbg("Got to INIT with DDR autoneg\n");
- dd->ipath_flags &= ~(IPATH_IB_AUTONEG_INPROG
- | IPATH_IB_AUTONEG_FAILED);
- dd->ipath_autoneg_tries = 0;
- /* re-enable SDR, for next link down */
- set_speed_fast(dd,
- dd->ipath_link_speed_enabled);
- wake_up(&dd->ipath_autoneg_wait);
- symadj = 1;
- } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) {
- /*
- * clear autoneg failure flag, and do setup
- * so we'll try next time link goes down and
- * back to INIT (possibly connected to different
- * device).
- */
- ipath_dbg("INIT %sDR after autoneg failure\n",
- (dd->ipath_link_speed_active &
- IPATH_IB_DDR) ? "D" : "S");
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_IBTA_1_2_MASK;
- ipath_write_kreg(dd,
- IPATH_KREG_OFFSET(IBNCModeCtrl), 0);
- symadj = 1;
- }
- }
- /*
- * if we are in 1X on rev1 only, and are in autoneg width,
- * it could be due to an xgxs problem, so if we haven't
- * already tried, try twice to get to 4X; if we
- * tried, and couldn't, report it, since it will
- * probably not be what is desired.
- */
- if (dd->ipath_minrev == 1 &&
- (dd->ipath_link_width_enabled & (IB_WIDTH_1X |
- IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)
- && dd->ipath_link_width_active == IB_WIDTH_1X
- && dd->ipath_x1_fix_tries < 3) {
- if (++dd->ipath_x1_fix_tries == 3) {
- dev_info(&dd->pcidev->dev,
- "IB link is in 1X mode\n");
- if (!(dd->ipath_flags &
- IPATH_IB_AUTONEG_INPROG))
- symadj = 1;
- }
- else {
- ipath_cdbg(VERBOSE, "IB 1X in "
- "auto-width, try %u to be "
- "sure it's really 1X; "
- "ltstate %u\n",
- dd->ipath_x1_fix_tries,
- ltstate);
- dd->ipath_f_xgxs_reset(dd);
- ret = 1; /* skip other processing */
- }
- } else if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
- symadj = 1;
-
- if (!ret) {
- dd->delay_mult = rate_to_delay
- [(ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1]
- [(ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1];
-
- ipath_set_relock_poll(dd, ibup);
- }
- }
-
- if (symadj) {
- if (dd->ibdeltainprog) {
- dd->ibdeltainprog = 0;
- dd->ibsymdelta += ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt) -
- dd->ibsymsnap;
- dd->iblnkerrdelta += ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt) -
- dd->iblnkerrsnap;
- }
- } else if (!ibup && !dd->ibdeltainprog
- && !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
- dd->ibdeltainprog = 1;
- dd->ibsymsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- }
-
- if (!ret)
- ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs),
- ltstate);
- return ret;
-}
-
-
-/*
- * Handle the empirically determined mechanism for auto-negotiation
- * of DDR speed with switches.
- */
-static void autoneg_work(struct work_struct *work)
-{
- struct ipath_devdata *dd;
- u64 startms;
- u32 lastlts, i;
-
- dd = container_of(work, struct ipath_devdata,
- ipath_autoneg_work.work);
-
- startms = jiffies_to_msecs(jiffies);
-
- /*
- * busy wait for this first part, it should be at most a
- * few hundred usec, since we scheduled ourselves for 2msec.
- */
- for (i = 0; i < 25; i++) {
- lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
- if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
- ipath_set_linkstate(dd, IPATH_IB_LINKDOWN_DISABLE);
- break;
- }
- udelay(100);
- }
-
- if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
- goto done; /* we got there early or told to stop */
-
- /* we expect this to timeout */
- if (wait_event_timeout(dd->ipath_autoneg_wait,
- !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
- msecs_to_jiffies(90)))
- goto done;
-
- ipath_toggle_rclkrls(dd);
-
- /* we expect this to timeout */
- if (wait_event_timeout(dd->ipath_autoneg_wait,
- !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
- msecs_to_jiffies(1700)))
- goto done;
-
- set_speed_fast(dd, IPATH_IB_SDR);
- ipath_toggle_rclkrls(dd);
-
- /*
- * wait up to 250 msec for link to train and get to INIT at DDR;
- * this should terminate early
- */
- wait_event_timeout(dd->ipath_autoneg_wait,
- !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
- msecs_to_jiffies(250));
-done:
- if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
- ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n",
- ipath_ib_state(dd, dd->ipath_lastibcstat),
- (unsigned long long) jiffies_to_msecs(jiffies)-startms);
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
- if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) {
- dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED;
- ipath_dbg("Giving up on DDR until next IB "
- "link Down\n");
- dd->ipath_autoneg_tries = 0;
- }
- set_speed_fast(dd, dd->ipath_link_speed_enabled);
- }
-}
-
-
-/**
- * ipath_init_iba7220_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba7220_funcs(struct ipath_devdata *dd)
-{
- dd->ipath_f_intrsetup = ipath_7220_intconfig;
- dd->ipath_f_bus = ipath_setup_7220_config;
- dd->ipath_f_reset = ipath_setup_7220_reset;
- dd->ipath_f_get_boardname = ipath_7220_boardname;
- dd->ipath_f_init_hwerrors = ipath_7220_init_hwerrors;
- dd->ipath_f_early_init = ipath_7220_early_init;
- dd->ipath_f_handle_hwerrors = ipath_7220_handle_hwerrors;
- dd->ipath_f_quiet_serdes = ipath_7220_quiet_serdes;
- dd->ipath_f_bringup_serdes = ipath_7220_bringup_serdes;
- dd->ipath_f_clear_tids = ipath_7220_clear_tids;
- dd->ipath_f_put_tid = ipath_7220_put_tid;
- dd->ipath_f_cleanup = ipath_setup_7220_cleanup;
- dd->ipath_f_setextled = ipath_setup_7220_setextled;
- dd->ipath_f_get_base_info = ipath_7220_get_base_info;
- dd->ipath_f_free_irq = ipath_7220_free_irq;
- dd->ipath_f_tidtemplate = ipath_7220_tidtemplate;
- dd->ipath_f_intr_fallback = ipath_7220_intr_fallback;
- dd->ipath_f_xgxs_reset = ipath_7220_xgxs_reset;
- dd->ipath_f_get_ib_cfg = ipath_7220_get_ib_cfg;
- dd->ipath_f_set_ib_cfg = ipath_7220_set_ib_cfg;
- dd->ipath_f_config_jint = ipath_7220_config_jint;
- dd->ipath_f_config_ports = ipath_7220_config_ports;
- dd->ipath_f_read_counters = ipath_7220_read_counters;
- dd->ipath_f_get_msgheader = ipath_7220_get_msgheader;
- dd->ipath_f_ib_updown = ipath_7220_ib_updown;
-
- /* initialize chip-specific variables */
- ipath_init_7220_variables(dd);
-}
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index b3d7efcdf021..6559af60bffd 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -1030,8 +1030,6 @@ void ipath_free_data(struct ipath_portdata *dd);
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
unsigned len, int avail);
-void ipath_init_iba7220_funcs(struct ipath_devdata *);
-void ipath_init_iba6120_funcs(struct ipath_devdata *);
void ipath_init_iba6110_funcs(struct ipath_devdata *);
void ipath_get_eeprom_info(struct ipath_devdata *);
int ipath_update_eeprom_log(struct ipath_devdata *dd);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 559f39be0dcc..dd7f26d04d46 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -2182,7 +2182,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
snprintf(dev->node_desc, sizeof(dev->node_desc),
IPATH_IDSTR " %s", init_utsname()->nodename);
- ret = ib_register_device(dev);
+ ret = ib_register_device(dev, NULL);
if (ret)
goto err_reg;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 39051417054c..4e94e360e43b 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -662,7 +662,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
- if (ib_register_device(&ibdev->ib_dev))
+ if (ib_register_device(&ibdev->ib_dev, NULL))
goto err_map;
if (mlx4_ib_mad_init(ibdev))
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index f080a784bc79..1e0b4b6074ad 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1403,7 +1403,7 @@ int mthca_register_device(struct mthca_dev *dev)
mutex_init(&dev->cap_mask_mutex);
- ret = ib_register_device(&dev->ib_dev);
+ ret = ib_register_device(&dev->ib_dev, NULL);
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 86acb7d57064..57874a165083 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2584,7 +2584,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
break;
}
}
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
if (phy_data & 0x0004) {
if (wide_ppm_offset &&
@@ -2639,6 +2638,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
}
}
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+
nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
}
@@ -3422,6 +3423,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 aeq_info;
u32 next_iwarp_state = 0;
+ u32 aeqe_cq_id;
u16 async_event_id;
u8 tcp_state;
u8 iwarp_state;
@@ -3449,6 +3451,14 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
+ aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
+ if (aeq_info & NES_AEQE_QP) {
+ if ((!nes_is_resource_allocated(nesadapter, nesadapter->allocated_qps,
+ aeqe_cq_id)) ||
+ (atomic_read(&nesqp->close_timer_started)))
+ return;
+ }
+
switch (async_event_id) {
case NES_AEQE_AEID_LLP_FIN_RECEIVED:
if (nesqp->term_flags)
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index e95e8d09ff38..5cc0a9ae5bb1 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1001,6 +1001,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+
static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Link Change Interrupts",
"Linearized SKBs",
@@ -1015,11 +1016,15 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Rx Jabber Errors",
"Rx Oversized Frames",
"Rx Short Frames",
+ "Rx Length Errors",
+ "Rx CRC Errors",
+ "Rx Port Discard",
"Endnode Rx Discards",
"Endnode Rx Octets",
"Endnode Rx Frames",
"Endnode Tx Octets",
"Endnode Tx Frames",
+ "Tx Errors",
"mh detected",
"mh pauses",
"Retransmission Count",
@@ -1048,19 +1053,13 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"CM Nodes Destroyed",
"CM Accel Drops",
"CM Resets Received",
+ "Free 4Kpbls",
+ "Free 256pbls",
"Timer Inits",
- "CQ Depth 1",
- "CQ Depth 4",
- "CQ Depth 16",
- "CQ Depth 24",
- "CQ Depth 32",
- "CQ Depth 128",
- "CQ Depth 256",
"LRO aggregated",
"LRO flushed",
"LRO no_desc",
};
-
#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
/**
@@ -1120,12 +1119,14 @@ static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
/**
* nes_netdev_get_ethtool_stats
*/
+
static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
{
u64 u64temp;
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 nic_count;
u32 u32temp;
u32 index = 0;
@@ -1154,6 +1155,46 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
nesvnic->nesdev->port_tx_discards += u32temp;
nesvnic->netstats.tx_dropped += u32temp;
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_short_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_length_errors += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_crc_errors += u32temp;
+ nesvnic->netstats.rx_crc_errors += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_tx_errors += u32temp;
+ nesvnic->netstats.tx_errors += u32temp;
+
for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
if (nesvnic->qp_nic_index[nic_count] == 0xf)
break;
@@ -1218,11 +1259,15 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
+ target_stat_values[++index] = nesvnic->netstats.rx_length_errors;
+ target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors;
+ target_stat_values[++index] = nesvnic->nesdev->port_rx_discards;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
+ target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors;
target_stat_values[++index] = mh_detected;
target_stat_values[++index] = mh_pauses_sent;
target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
@@ -1251,21 +1296,14 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
target_stat_values[++index] = atomic_read(&cm_resets_recvd);
+ target_stat_values[++index] = nesadapter->free_4kpbl;
+ target_stat_values[++index] = nesadapter->free_256pbl;
target_stat_values[++index] = int_mod_timer_init;
- target_stat_values[++index] = int_mod_cq_depth_1;
- target_stat_values[++index] = int_mod_cq_depth_4;
- target_stat_values[++index] = int_mod_cq_depth_16;
- target_stat_values[++index] = int_mod_cq_depth_24;
- target_stat_values[++index] = int_mod_cq_depth_32;
- target_stat_values[++index] = int_mod_cq_depth_128;
- target_stat_values[++index] = int_mod_cq_depth_256;
target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
-
}
-
/**
* nes_netdev_get_drvinfo
*/
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 925e1f2d1d55..9bc2d744b2ea 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3962,7 +3962,7 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
struct nes_adapter *nesadapter = nesdev->nesadapter;
int i, ret;
- ret = ib_register_device(&nesvnic->nesibdev->ibdev);
+ ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL);
if (ret) {
return ret;
}
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
new file mode 100644
index 000000000000..7c03a70c55a2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -0,0 +1,7 @@
+config INFINIBAND_QIB
+ tristate "QLogic PCIe HCA support"
+ depends on 64BIT && NET
+ ---help---
+ This is a low-level driver for QLogic PCIe QLE InfiniBand host
+ channel adapters. This driver does not support the QLogic
+ HyperTransport card (model QHT7140).
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
new file mode 100644
index 000000000000..c6515a1b9a6a
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
+
+ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \
+ qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
+ qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
+ qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
+ qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
+ qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
+ qib_sd7220.o qib_sd7220_img.o qib_iba7322.o qib_verbs.o
+
+# 6120 has no fallback if no MSI interrupts, others can do INTx
+ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o
+
+ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o
+ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
new file mode 100644
index 000000000000..32d9208efcff
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -0,0 +1,1439 @@
+#ifndef _QIB_KERNEL_H
+#define _QIB_KERNEL_H
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This header file is the base header file for qlogic_ib kernel code
+ * qib_user.h serves a similar purpose for user code.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+
+#include "qib_common.h"
+#include "qib_verbs.h"
+
+/* only s/w major version of QLogic_IB we can handle */
+#define QIB_CHIP_VERS_MAJ 2U
+
+/* don't care about this except printing */
+#define QIB_CHIP_VERS_MIN 0U
+
+/* The Organization Unique Identifier (Mfg code), and its position in GUID */
+#define QIB_OUI 0x001175
+#define QIB_OUI_LSB 40
+
+/*
+ * per driver stats, either not device nor port-specific, or
+ * summed over all of the devices and ports.
+ * They are described by name via ipathfs filesystem, so layout
+ * and number of elements can change without breaking compatibility.
+ * If members are added or deleted qib_statnames[] in qib_fs.c must
+ * change to match.
+ */
+struct qlogic_ib_stats {
+ __u64 sps_ints; /* number of interrupts handled */
+ __u64 sps_errints; /* number of error interrupts */
+ __u64 sps_txerrs; /* tx-related packet errors */
+ __u64 sps_rcverrs; /* non-crc rcv packet errors */
+ __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
+ __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
+ __u64 sps_ctxts; /* number of contexts currently open */
+ __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
+ __u64 sps_buffull;
+ __u64 sps_hdrfull;
+};
+
+extern struct qlogic_ib_stats qib_stats;
+extern struct pci_error_handlers qib_pci_err_handler;
+extern struct pci_driver qib_driver;
+
+#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
+/*
+ * First-cut critierion for "device is active" is
+ * two thousand dwords combined Tx, Rx traffic per
+ * 5-second interval. SMA packets are 64 dwords,
+ * and occur "a few per second", presumably each way.
+ */
+#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
+
+/*
+ * Struct used to indicate which errors are logged in each of the
+ * error-counters that are logged to EEPROM. A counter is incremented
+ * _once_ (saturating at 255) for each event with any bits set in
+ * the error or hwerror register masks below.
+ */
+#define QIB_EEP_LOG_CNT (4)
+struct qib_eep_log_mask {
+ u64 errs_to_log;
+ u64 hwerrs_to_log;
+};
+
+/*
+ * Below contains all data related to a single context (formerly called port).
+ */
+struct qib_ctxtdata {
+ void **rcvegrbuf;
+ dma_addr_t *rcvegrbuf_phys;
+ /* rcvhdrq base, needs mmap before useful */
+ void *rcvhdrq;
+ /* kernel virtual address where hdrqtail is updated */
+ void *rcvhdrtail_kvaddr;
+ /*
+ * temp buffer for expected send setup, allocated at open, instead
+ * of each setup call
+ */
+ void *tid_pg_list;
+ /*
+ * Shared page for kernel to signal user processes that send buffers
+ * need disarming. The process should call QIB_CMD_DISARM_BUFS
+ * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
+ */
+ unsigned long *user_event_mask;
+ /* when waiting for rcv or pioavail */
+ wait_queue_head_t wait;
+ /*
+ * rcvegr bufs base, physical, must fit
+ * in 44 bits so 32 bit programs mmap64 44 bit works)
+ */
+ dma_addr_t rcvegr_phys;
+ /* mmap of hdrq, must fit in 44 bits */
+ dma_addr_t rcvhdrq_phys;
+ dma_addr_t rcvhdrqtailaddr_phys;
+
+ /*
+ * number of opens (including slave sub-contexts) on this instance
+ * (ignoring forks, dup, etc. for now)
+ */
+ int cnt;
+ /*
+ * how much space to leave at start of eager TID entries for
+ * protocol use, on each TID
+ */
+ /* instead of calculating it */
+ unsigned ctxt;
+ /* non-zero if ctxt is being shared. */
+ u16 subctxt_cnt;
+ /* non-zero if ctxt is being shared. */
+ u16 subctxt_id;
+ /* number of eager TID entries. */
+ u16 rcvegrcnt;
+ /* index of first eager TID entry. */
+ u16 rcvegr_tid_base;
+ /* number of pio bufs for this ctxt (all procs, if shared) */
+ u32 piocnt;
+ /* first pio buffer for this ctxt */
+ u32 pio_base;
+ /* chip offset of PIO buffers for this ctxt */
+ u32 piobufs;
+ /* how many alloc_pages() chunks in rcvegrbuf_pages */
+ u32 rcvegrbuf_chunks;
+ /* how many egrbufs per chunk */
+ u32 rcvegrbufs_perchunk;
+ /* order for rcvegrbuf_pages */
+ size_t rcvegrbuf_size;
+ /* rcvhdrq size (for freeing) */
+ size_t rcvhdrq_size;
+ /* per-context flags for fileops/intr communication */
+ unsigned long flag;
+ /* next expected TID to check when looking for free */
+ u32 tidcursor;
+ /* WAIT_RCV that timed out, no interrupt */
+ u32 rcvwait_to;
+ /* WAIT_PIO that timed out, no interrupt */
+ u32 piowait_to;
+ /* WAIT_RCV already happened, no wait */
+ u32 rcvnowait;
+ /* WAIT_PIO already happened, no wait */
+ u32 pionowait;
+ /* total number of polled urgent packets */
+ u32 urgent;
+ /* saved total number of polled urgent packets for poll edge trigger */
+ u32 urgent_poll;
+ /* pid of process using this ctxt */
+ pid_t pid;
+ pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
+ /* same size as task_struct .comm[], command that opened context */
+ char comm[16];
+ /* pkeys set by this use of this ctxt */
+ u16 pkeys[4];
+ /* so file ops can get at unit */
+ struct qib_devdata *dd;
+ /* so funcs that need physical port can get it easily */
+ struct qib_pportdata *ppd;
+ /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
+ void *subctxt_uregbase;
+ /* An array of pages for the eager receive buffers * N */
+ void *subctxt_rcvegrbuf;
+ /* An array of pages for the eager header queue entries * N */
+ void *subctxt_rcvhdr_base;
+ /* The version of the library which opened this ctxt */
+ u32 userversion;
+ /* Bitmask of active slaves */
+ u32 active_slaves;
+ /* Type of packets or conditions we want to poll for */
+ u16 poll_type;
+ /* receive packet sequence counter */
+ u8 seq_cnt;
+ u8 redirect_seq_cnt;
+ /* ctxt rcvhdrq head offset */
+ u32 head;
+ u32 pkt_count;
+ /* QPs waiting for context processing */
+ struct list_head qp_wait_list;
+};
+
+struct qib_sge_state;
+
+struct qib_sdma_txreq {
+ int flags;
+ int sg_count;
+ dma_addr_t addr;
+ void (*callback)(struct qib_sdma_txreq *, int);
+ u16 start_idx; /* sdma private */
+ u16 next_descq_idx; /* sdma private */
+ struct list_head list; /* sdma private */
+};
+
+struct qib_sdma_desc {
+ __le64 qw[2];
+};
+
+struct qib_verbs_txreq {
+ struct qib_sdma_txreq txreq;
+ struct qib_qp *qp;
+ struct qib_swqe *wqe;
+ u32 dwords;
+ u16 hdr_dwords;
+ u16 hdr_inx;
+ struct qib_pio_header *align_buf;
+ struct qib_mregion *mr;
+ struct qib_sge_state *ss;
+};
+
+#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
+#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
+#define QIB_SDMA_TXREQ_F_INTREQ 0x4
+#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
+#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
+
+#define QIB_SDMA_TXREQ_S_OK 0
+#define QIB_SDMA_TXREQ_S_SENDERROR 1
+#define QIB_SDMA_TXREQ_S_ABORTED 2
+#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
+
+/*
+ * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
+ * Mostly for MADs that set or query link parameters, also ipath
+ * config interfaces
+ */
+#define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
+#define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */
+#define QIB_IB_CFG_LWID 3 /* currently active Link-width */
+#define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
+#define QIB_IB_CFG_SPD 5 /* current Link spd */
+#define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
+#define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
+#define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
+#define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
+#define QIB_IB_CFG_OP_VLS 10 /* operational VLs */
+#define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
+#define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
+#define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
+#define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
+#define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
+#define QIB_IB_CFG_PKEYS 16 /* update partition keys */
+#define QIB_IB_CFG_MTU 17 /* update MTU in IBC */
+#define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */
+#define QIB_IB_CFG_VL_HIGH_LIMIT 19
+#define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
+#define QIB_IB_CFG_PORT 21 /* switch port we are connected to */
+
+/*
+ * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
+ * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
+ * QIB_IB_CFG_LINKDEFAULT cmd
+ */
+#define IB_LINKCMD_DOWN (0 << 16)
+#define IB_LINKCMD_ARMED (1 << 16)
+#define IB_LINKCMD_ACTIVE (2 << 16)
+#define IB_LINKINITCMD_NOP 0
+#define IB_LINKINITCMD_POLL 1
+#define IB_LINKINITCMD_SLEEP 2
+#define IB_LINKINITCMD_DISABLE 3
+
+/*
+ * valid states passed to qib_set_linkstate() user call
+ */
+#define QIB_IB_LINKDOWN 0
+#define QIB_IB_LINKARM 1
+#define QIB_IB_LINKACTIVE 2
+#define QIB_IB_LINKDOWN_ONLY 3
+#define QIB_IB_LINKDOWN_SLEEP 4
+#define QIB_IB_LINKDOWN_DISABLE 5
+
+/*
+ * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
+ * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
+ * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
+ * are also the the possible values for qib_link_speed_enabled and active
+ * The values were chosen to match values used within the IB spec.
+ */
+#define QIB_IB_SDR 1
+#define QIB_IB_DDR 2
+#define QIB_IB_QDR 4
+
+#define QIB_DEFAULT_MTU 4096
+
+/*
+ * Possible IB config parameters for f_get/set_ib_table()
+ */
+#define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */
+#define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */
+
+/*
+ * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
+ * these are bits so they can be combined, e.g.
+ * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
+ */
+#define QIB_RCVCTRL_TAILUPD_ENB 0x01
+#define QIB_RCVCTRL_TAILUPD_DIS 0x02
+#define QIB_RCVCTRL_CTXT_ENB 0x04
+#define QIB_RCVCTRL_CTXT_DIS 0x08
+#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
+#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
+#define QIB_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
+#define QIB_RCVCTRL_PKEY_DIS 0x80
+#define QIB_RCVCTRL_BP_ENB 0x0100
+#define QIB_RCVCTRL_BP_DIS 0x0200
+#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
+#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
+
+/*
+ * Possible "operations" for f_sendctrl(ppd, op, var)
+ * these are bits so they can be combined, e.g.
+ * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
+ * Some operations (e.g. DISARM, ABORT) are known to
+ * be "one-shot", so do not modify shadow.
+ */
+#define QIB_SENDCTRL_DISARM (0x1000)
+#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
+ /* available (0x2000) */
+#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
+#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
+#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
+#define QIB_SENDCTRL_SEND_DIS (0x20000)
+#define QIB_SENDCTRL_SEND_ENB (0x40000)
+#define QIB_SENDCTRL_FLUSH (0x80000)
+#define QIB_SENDCTRL_CLEAR (0x100000)
+#define QIB_SENDCTRL_DISARM_ALL (0x200000)
+
+/*
+ * These are the generic indices for requesting per-port
+ * counter values via the f_portcntr function. They
+ * are always returned as 64 bit values, although most
+ * are 32 bit counters.
+ */
+/* send-related counters */
+#define QIBPORTCNTR_PKTSEND 0U
+#define QIBPORTCNTR_WORDSEND 1U
+#define QIBPORTCNTR_PSXMITDATA 2U
+#define QIBPORTCNTR_PSXMITPKTS 3U
+#define QIBPORTCNTR_PSXMITWAIT 4U
+#define QIBPORTCNTR_SENDSTALL 5U
+/* receive-related counters */
+#define QIBPORTCNTR_PKTRCV 6U
+#define QIBPORTCNTR_PSRCVDATA 7U
+#define QIBPORTCNTR_PSRCVPKTS 8U
+#define QIBPORTCNTR_RCVEBP 9U
+#define QIBPORTCNTR_RCVOVFL 10U
+#define QIBPORTCNTR_WORDRCV 11U
+/* IB link related error counters */
+#define QIBPORTCNTR_RXLOCALPHYERR 12U
+#define QIBPORTCNTR_RXVLERR 13U
+#define QIBPORTCNTR_ERRICRC 14U
+#define QIBPORTCNTR_ERRVCRC 15U
+#define QIBPORTCNTR_ERRLPCRC 16U
+#define QIBPORTCNTR_BADFORMAT 17U
+#define QIBPORTCNTR_ERR_RLEN 18U
+#define QIBPORTCNTR_IBSYMBOLERR 19U
+#define QIBPORTCNTR_INVALIDRLEN 20U
+#define QIBPORTCNTR_UNSUPVL 21U
+#define QIBPORTCNTR_EXCESSBUFOVFL 22U
+#define QIBPORTCNTR_ERRLINK 23U
+#define QIBPORTCNTR_IBLINKDOWN 24U
+#define QIBPORTCNTR_IBLINKERRRECOV 25U
+#define QIBPORTCNTR_LLI 26U
+/* other error counters */
+#define QIBPORTCNTR_RXDROPPKT 27U
+#define QIBPORTCNTR_VL15PKTDROP 28U
+#define QIBPORTCNTR_ERRPKEY 29U
+#define QIBPORTCNTR_KHDROVFL 30U
+/* sampling counters (these are actually control registers) */
+#define QIBPORTCNTR_PSINTERVAL 31U
+#define QIBPORTCNTR_PSSTART 32U
+#define QIBPORTCNTR_PSSTAT 33U
+
+/* how often we check for packet activity for "power on hours (in seconds) */
+#define ACTIVITY_TIMER 5
+
+/* Below is an opaque struct. Each chip (device) can maintain
+ * private data needed for its operation, but not germane to the
+ * rest of the driver. For convenience, we define another that
+ * is chip-specific, per-port
+ */
+struct qib_chip_specific;
+struct qib_chipport_specific;
+
+enum qib_sdma_states {
+ qib_sdma_state_s00_hw_down,
+ qib_sdma_state_s10_hw_start_up_wait,
+ qib_sdma_state_s20_idle,
+ qib_sdma_state_s30_sw_clean_up_wait,
+ qib_sdma_state_s40_hw_clean_up_wait,
+ qib_sdma_state_s50_hw_halt_wait,
+ qib_sdma_state_s99_running,
+};
+
+enum qib_sdma_events {
+ qib_sdma_event_e00_go_hw_down,
+ qib_sdma_event_e10_go_hw_start,
+ qib_sdma_event_e20_hw_started,
+ qib_sdma_event_e30_go_running,
+ qib_sdma_event_e40_sw_cleaned,
+ qib_sdma_event_e50_hw_cleaned,
+ qib_sdma_event_e60_hw_halted,
+ qib_sdma_event_e70_go_idle,
+ qib_sdma_event_e7220_err_halted,
+ qib_sdma_event_e7322_err_halted,
+ qib_sdma_event_e90_timer_tick,
+};
+
+extern char *qib_sdma_state_names[];
+extern char *qib_sdma_event_names[];
+
+struct sdma_set_state_action {
+ unsigned op_enable:1;
+ unsigned op_intenable:1;
+ unsigned op_halt:1;
+ unsigned op_drain:1;
+ unsigned go_s99_running_tofalse:1;
+ unsigned go_s99_running_totrue:1;
+};
+
+struct qib_sdma_state {
+ struct kref kref;
+ struct completion comp;
+ enum qib_sdma_states current_state;
+ struct sdma_set_state_action *set_state_action;
+ unsigned current_op;
+ unsigned go_s99_running;
+ unsigned first_sendbuf;
+ unsigned last_sendbuf; /* really last +1 */
+ /* debugging/devel */
+ enum qib_sdma_states previous_state;
+ unsigned previous_op;
+ enum qib_sdma_events last_event;
+};
+
+struct xmit_wait {
+ struct timer_list timer;
+ u64 counter;
+ u8 flags;
+ struct cache {
+ u64 psxmitdata;
+ u64 psrcvdata;
+ u64 psxmitpkts;
+ u64 psrcvpkts;
+ u64 psxmitwait;
+ } counter_cache;
+};
+
+/*
+ * The structure below encapsulates data relevant to a physical IB Port.
+ * Current chips support only one such port, but the separation
+ * clarifies things a bit. Note that to conform to IB conventions,
+ * port-numbers are one-based. The first or only port is port1.
+ */
+struct qib_pportdata {
+ struct qib_ibport ibport_data;
+
+ struct qib_devdata *dd;
+ struct qib_chippport_specific *cpspec; /* chip-specific per-port */
+ struct kobject pport_kobj;
+ struct kobject sl2vl_kobj;
+ struct kobject diagc_kobj;
+
+ /* GUID for this interface, in network order */
+ __be64 guid;
+
+ /* QIB_POLL, etc. link-state specific flags, per port */
+ u32 lflags;
+ /* qib_lflags driver is waiting for */
+ u32 state_wanted;
+ spinlock_t lflags_lock;
+ /* number of (port-specific) interrupts for this port -- saturates... */
+ u32 int_counter;
+
+ /* ref count for each pkey */
+ atomic_t pkeyrefs[4];
+
+ /*
+ * this address is mapped readonly into user processes so they can
+ * get status cheaply, whenever they want. One qword of status per port
+ */
+ u64 *statusp;
+
+ /* SendDMA related entries */
+ spinlock_t sdma_lock;
+ struct qib_sdma_state sdma_state;
+ unsigned long sdma_buf_jiffies;
+ struct qib_sdma_desc *sdma_descq;
+ u64 sdma_descq_added;
+ u64 sdma_descq_removed;
+ u16 sdma_descq_cnt;
+ u16 sdma_descq_tail;
+ u16 sdma_descq_head;
+ u16 sdma_next_intr;
+ u16 sdma_reset_wait;
+ u8 sdma_generation;
+ struct tasklet_struct sdma_sw_clean_up_task;
+ struct list_head sdma_activelist;
+
+ dma_addr_t sdma_descq_phys;
+ volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
+ dma_addr_t sdma_head_phys;
+
+ wait_queue_head_t state_wait; /* for state_wanted */
+
+ /* HoL blocking for SMP replies */
+ unsigned hol_state;
+ struct timer_list hol_timer;
+
+ /*
+ * Shadow copies of registers; size indicates read access size.
+ * Most of them are readonly, but some are write-only register,
+ * where we manipulate the bits in the shadow copy, and then write
+ * the shadow copy to qlogic_ib.
+ *
+ * We deliberately make most of these 32 bits, since they have
+ * restricted range. For any that we read, we won't to generate 32
+ * bit accesses, since Opteron will generate 2 separate 32 bit HT
+ * transactions for a 64 bit read, and we want to avoid unnecessary
+ * bus transactions.
+ */
+
+ /* This is the 64 bit group */
+ /* last ibcstatus. opaque outside chip-specific code */
+ u64 lastibcstat;
+
+ /* these are the "32 bit" regs */
+
+ /*
+ * the following two are 32-bit bitmasks, but {test,clear,set}_bit
+ * all expect bit fields to be "unsigned long"
+ */
+ unsigned long p_rcvctrl; /* shadow per-port rcvctrl */
+ unsigned long p_sendctrl; /* shadow per-port sendctrl */
+
+ u32 ibmtu; /* The MTU programmed for this unit */
+ /*
+ * Current max size IB packet (in bytes) including IB headers, that
+ * we can send. Changes when ibmtu changes.
+ */
+ u32 ibmaxlen;
+ /*
+ * ibmaxlen at init time, limited by chip and by receive buffer
+ * size. Not changed after init.
+ */
+ u32 init_ibmaxlen;
+ /* LID programmed for this instance */
+ u16 lid;
+ /* list of pkeys programmed; 0 if not set */
+ u16 pkeys[4];
+ /* LID mask control */
+ u8 lmc;
+ u8 link_width_supported;
+ u8 link_speed_supported;
+ u8 link_width_enabled;
+ u8 link_speed_enabled;
+ u8 link_width_active;
+ u8 link_speed_active;
+ u8 vls_supported;
+ u8 vls_operational;
+ /* Rx Polarity inversion (compensate for ~tx on partner) */
+ u8 rx_pol_inv;
+
+ u8 hw_pidx; /* physical port index */
+ u8 port; /* IB port number and index into dd->pports - 1 */
+
+ u8 delay_mult;
+
+ /* used to override LED behavior */
+ u8 led_override; /* Substituted for normal value, if non-zero */
+ u16 led_override_timeoff; /* delta to next timer event */
+ u8 led_override_vals[2]; /* Alternates per blink-frame */
+ u8 led_override_phase; /* Just counts, LSB picks from vals[] */
+ atomic_t led_override_timer_active;
+ /* Used to flash LEDs in override mode */
+ struct timer_list led_override_timer;
+ struct xmit_wait cong_stats;
+ struct timer_list symerr_clear_timer;
+};
+
+/* Observers. Not to be taken lightly, possibly not to ship. */
+/*
+ * If a diag read or write is to (bottom <= offset <= top),
+ * the "hoook" is called, allowing, e.g. shadows to be
+ * updated in sync with the driver. struct diag_observer
+ * is the "visible" part.
+ */
+struct diag_observer;
+
+typedef int (*diag_hook) (struct qib_devdata *dd,
+ const struct diag_observer *op,
+ u32 offs, u64 *data, u64 mask, int only_32);
+
+struct diag_observer {
+ diag_hook hook;
+ u32 bottom;
+ u32 top;
+};
+
+extern int qib_register_observer(struct qib_devdata *dd,
+ const struct diag_observer *op);
+
+/* Only declared here, not defined. Private to diags */
+struct diag_observer_list_elt;
+
+/* device data struct now contains only "general per-device" info.
+ * fields related to a physical IB port are in a qib_pportdata struct,
+ * described above) while fields only used by a particualr chip-type are in
+ * a qib_chipdata struct, whose contents are opaque to this file.
+ */
+struct qib_devdata {
+ struct qib_ibdev verbs_dev; /* must be first */
+ struct list_head list;
+ /* pointers to related structs for this device */
+ /* pci access data structure */
+ struct pci_dev *pcidev;
+ struct cdev *user_cdev;
+ struct cdev *diag_cdev;
+ struct device *user_device;
+ struct device *diag_device;
+
+ /* mem-mapped pointer to base of chip regs */
+ u64 __iomem *kregbase;
+ /* end of mem-mapped chip space excluding sendbuf and user regs */
+ u64 __iomem *kregend;
+ /* physical address of chip for io_remap, etc. */
+ resource_size_t physaddr;
+ /* qib_cfgctxts pointers */
+ struct qib_ctxtdata **rcd; /* Receive Context Data */
+
+ /* qib_pportdata, points to array of (physical) port-specific
+ * data structs, indexed by pidx (0..n-1)
+ */
+ struct qib_pportdata *pport;
+ struct qib_chip_specific *cspec; /* chip-specific */
+
+ /* kvirt address of 1st 2k pio buffer */
+ void __iomem *pio2kbase;
+ /* kvirt address of 1st 4k pio buffer */
+ void __iomem *pio4kbase;
+ /* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
+ void __iomem *piobase;
+ /* mem-mapped pointer to base of user chip regs (if using WC PAT) */
+ u64 __iomem *userbase;
+ /*
+ * points to area where PIOavail registers will be DMA'ed.
+ * Has to be on a page of it's own, because the page will be
+ * mapped into user program space. This copy is *ONLY* ever
+ * written by DMA, not by the driver! Need a copy per device
+ * when we get to multiple devices
+ */
+ volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */
+ /* physical address where updates occur */
+ dma_addr_t pioavailregs_phys;
+
+ /* device-specific implementations of functions needed by
+ * common code. Contrary to previous consensus, we can't
+ * really just point to a device-specific table, because we
+ * may need to "bend", e.g. *_f_put_tid
+ */
+ /* fallback to alternate interrupt type if possible */
+ int (*f_intr_fallback)(struct qib_devdata *);
+ /* hard reset chip */
+ int (*f_reset)(struct qib_devdata *);
+ void (*f_quiet_serdes)(struct qib_pportdata *);
+ int (*f_bringup_serdes)(struct qib_pportdata *);
+ int (*f_early_init)(struct qib_devdata *);
+ void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
+ void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
+ u32, unsigned long);
+ void (*f_cleanup)(struct qib_devdata *);
+ void (*f_setextled)(struct qib_pportdata *, u32);
+ /* fill out chip-specific fields */
+ int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
+ /* free irq */
+ void (*f_free_irq)(struct qib_devdata *);
+ struct qib_message_header *(*f_get_msgheader)
+ (struct qib_devdata *, __le32 *);
+ void (*f_config_ctxts)(struct qib_devdata *);
+ int (*f_get_ib_cfg)(struct qib_pportdata *, int);
+ int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
+ int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
+ int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
+ int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
+ u32 (*f_iblink_state)(u64);
+ u8 (*f_ibphys_portstate)(u64);
+ void (*f_xgxs_reset)(struct qib_pportdata *);
+ /* per chip actions needed for IB Link up/down changes */
+ int (*f_ib_updown)(struct qib_pportdata *, int, u64);
+ u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
+ /* Read/modify/write of GPIO pins (potentially chip-specific */
+ int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
+ u32 mask);
+ /* Enable writes to config EEPROM (if supported) */
+ int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
+ /*
+ * modify rcvctrl shadow[s] and write to appropriate chip-regs.
+ * see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
+ * (ctxt == -1) means "all contexts", only meaningful for
+ * clearing. Could remove if chip_spec shutdown properly done.
+ */
+ void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
+ int ctxt);
+ /* Read/modify/write sendctrl appropriately for op and port. */
+ void (*f_sendctrl)(struct qib_pportdata *, u32 op);
+ void (*f_set_intr_state)(struct qib_devdata *, u32);
+ void (*f_set_armlaunch)(struct qib_devdata *, u32);
+ void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
+ int (*f_late_initreg)(struct qib_devdata *);
+ int (*f_init_sdma_regs)(struct qib_pportdata *);
+ u16 (*f_sdma_gethead)(struct qib_pportdata *);
+ int (*f_sdma_busy)(struct qib_pportdata *);
+ void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
+ void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
+ void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
+ void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
+ void (*f_sdma_hw_start_up)(struct qib_pportdata *);
+ void (*f_sdma_init_early)(struct qib_pportdata *);
+ void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
+ void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
+ u32 (*f_hdrqempty)(struct qib_ctxtdata *);
+ u64 (*f_portcntr)(struct qib_pportdata *, u32);
+ u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
+ u64 **);
+ u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
+ char **, u64 **);
+ u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
+ void (*f_initvl15_bufs)(struct qib_devdata *);
+ void (*f_init_ctxt)(struct qib_ctxtdata *);
+ void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
+ struct qib_ctxtdata *);
+ void (*f_writescratch)(struct qib_devdata *, u32);
+ int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
+
+ char *boardname; /* human readable board info */
+
+ /* template for writing TIDs */
+ u64 tidtemplate;
+ /* value to write to free TIDs */
+ u64 tidinvalid;
+
+ /* number of registers used for pioavail */
+ u32 pioavregs;
+ /* device (not port) flags, basically device capabilities */
+ u32 flags;
+ /* last buffer for user use */
+ u32 lastctxt_piobuf;
+
+ /* saturating counter of (non-port-specific) device interrupts */
+ u32 int_counter;
+
+ /* pio bufs allocated per ctxt */
+ u32 pbufsctxt;
+ /* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
+ u32 ctxts_extrabuf;
+ /*
+ * number of ctxts configured as max; zero is set to number chip
+ * supports, less gives more pio bufs/ctxt, etc.
+ */
+ u32 cfgctxts;
+
+ /*
+ * hint that we should update pioavailshadow before
+ * looking for a PIO buffer
+ */
+ u32 upd_pio_shadow;
+
+ /* internal debugging stats */
+ u32 maxpkts_call;
+ u32 avgpkts_call;
+ u64 nopiobufs;
+
+ /* PCI Vendor ID (here for NodeInfo) */
+ u16 vendorid;
+ /* PCI Device ID (here for NodeInfo) */
+ u16 deviceid;
+ /* for write combining settings */
+ unsigned long wc_cookie;
+ unsigned long wc_base;
+ unsigned long wc_len;
+
+ /* shadow copy of struct page *'s for exp tid pages */
+ struct page **pageshadow;
+ /* shadow copy of dma handles for exp tid pages */
+ dma_addr_t *physshadow;
+ u64 __iomem *egrtidbase;
+ spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */
+ /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
+ spinlock_t uctxt_lock; /* rcd and user context changes */
+ /*
+ * per unit status, see also portdata statusp
+ * mapped readonly into user processes so they can get unit and
+ * IB link status cheaply
+ */
+ u64 *devstatusp;
+ char *freezemsg; /* freeze msg if hw error put chip in freeze */
+ u32 freezelen; /* max length of freezemsg */
+ /* timer used to prevent stats overflow, error throttling, etc. */
+ struct timer_list stats_timer;
+
+ /* timer to verify interrupts work, and fallback if possible */
+ struct timer_list intrchk_timer;
+ unsigned long ureg_align; /* user register alignment */
+
+ /*
+ * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
+ * pio_writing.
+ */
+ spinlock_t pioavail_lock;
+
+ /*
+ * Shadow copies of registers; size indicates read access size.
+ * Most of them are readonly, but some are write-only register,
+ * where we manipulate the bits in the shadow copy, and then write
+ * the shadow copy to qlogic_ib.
+ *
+ * We deliberately make most of these 32 bits, since they have
+ * restricted range. For any that we read, we won't to generate 32
+ * bit accesses, since Opteron will generate 2 separate 32 bit HT
+ * transactions for a 64 bit read, and we want to avoid unnecessary
+ * bus transactions.
+ */
+
+ /* This is the 64 bit group */
+
+ unsigned long pioavailshadow[6];
+ /* bitmap of send buffers available for the kernel to use with PIO. */
+ unsigned long pioavailkernel[6];
+ /* bitmap of send buffers which need to be disarmed. */
+ unsigned long pio_need_disarm[3];
+ /* bitmap of send buffers which are being written to. */
+ unsigned long pio_writing[3];
+ /* kr_revision shadow */
+ u64 revision;
+ /* Base GUID for device (from eeprom, network order) */
+ __be64 base_guid;
+
+ /*
+ * kr_sendpiobufbase value (chip offset of pio buffers), and the
+ * base of the 2KB buffer s(user processes only use 2K)
+ */
+ u64 piobufbase;
+ u32 pio2k_bufbase;
+
+ /* these are the "32 bit" regs */
+
+ /* number of GUIDs in the flash for this interface */
+ u32 nguid;
+ /*
+ * the following two are 32-bit bitmasks, but {test,clear,set}_bit
+ * all expect bit fields to be "unsigned long"
+ */
+ unsigned long rcvctrl; /* shadow per device rcvctrl */
+ unsigned long sendctrl; /* shadow per device sendctrl */
+
+ /* value we put in kr_rcvhdrcnt */
+ u32 rcvhdrcnt;
+ /* value we put in kr_rcvhdrsize */
+ u32 rcvhdrsize;
+ /* value we put in kr_rcvhdrentsize */
+ u32 rcvhdrentsize;
+ /* kr_ctxtcnt value */
+ u32 ctxtcnt;
+ /* kr_pagealign value */
+ u32 palign;
+ /* number of "2KB" PIO buffers */
+ u32 piobcnt2k;
+ /* size in bytes of "2KB" PIO buffers */
+ u32 piosize2k;
+ /* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
+ u32 piosize2kmax_dwords;
+ /* number of "4KB" PIO buffers */
+ u32 piobcnt4k;
+ /* size in bytes of "4KB" PIO buffers */
+ u32 piosize4k;
+ /* kr_rcvegrbase value */
+ u32 rcvegrbase;
+ /* kr_rcvtidbase value */
+ u32 rcvtidbase;
+ /* kr_rcvtidcnt value */
+ u32 rcvtidcnt;
+ /* kr_userregbase */
+ u32 uregbase;
+ /* shadow the control register contents */
+ u32 control;
+
+ /* chip address space used by 4k pio buffers */
+ u32 align4k;
+ /* size of each rcvegrbuffer */
+ u32 rcvegrbufsize;
+ /* localbus width (1, 2,4,8,16,32) from config space */
+ u32 lbus_width;
+ /* localbus speed in MHz */
+ u32 lbus_speed;
+ int unit; /* unit # of this chip */
+
+ /* start of CHIP_SPEC move to chipspec, but need code changes */
+ /* low and high portions of MSI capability/vector */
+ u32 msi_lo;
+ /* saved after PCIe init for restore after reset */
+ u32 msi_hi;
+ /* MSI data (vector) saved for restore */
+ u16 msi_data;
+ /* so we can rewrite it after a chip reset */
+ u32 pcibar0;
+ /* so we can rewrite it after a chip reset */
+ u32 pcibar1;
+ u64 rhdrhead_intr_off;
+
+ /*
+ * ASCII serial number, from flash, large enough for original
+ * all digit strings, and longer QLogic serial number format
+ */
+ u8 serial[16];
+ /* human readable board version */
+ u8 boardversion[96];
+ u8 lbus_info[32]; /* human readable localbus info */
+ /* chip major rev, from qib_revision */
+ u8 majrev;
+ /* chip minor rev, from qib_revision */
+ u8 minrev;
+
+ /* Misc small ints */
+ /* Number of physical ports available */
+ u8 num_pports;
+ /* Lowest context number which can be used by user processes */
+ u8 first_user_ctxt;
+ u8 n_krcv_queues;
+ u8 qpn_mask;
+ u8 skip_kctxt_mask;
+
+ u16 rhf_offset; /* offset of RHF within receive header entry */
+
+ /*
+ * GPIO pins for twsi-connected devices, and device code for eeprom
+ */
+ u8 gpio_sda_num;
+ u8 gpio_scl_num;
+ u8 twsi_eeprom_dev;
+ u8 board_atten;
+
+ /* Support (including locks) for EEPROM logging of errors and time */
+ /* control access to actual counters, timer */
+ spinlock_t eep_st_lock;
+ /* control high-level access to EEPROM */
+ struct mutex eep_lock;
+ uint64_t traffic_wds;
+ /* active time is kept in seconds, but logged in hours */
+ atomic_t active_time;
+ /* Below are nominal shadow of EEPROM, new since last EEPROM update */
+ uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
+ uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
+ uint16_t eep_hrs;
+ /*
+ * masks for which bits of errs, hwerrs that cause
+ * each of the counters to increment.
+ */
+ struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
+ struct qib_diag_client *diag_client;
+ spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
+ struct diag_observer_list_elt *diag_observer_list;
+
+ u8 psxmitwait_supported;
+ /* cycle length of PS* counters in HW (in picoseconds) */
+ u16 psxmitwait_check_rate;
+};
+
+/* hol_state values */
+#define QIB_HOL_UP 0
+#define QIB_HOL_INIT 1
+
+#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
+#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
+#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
+#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
+#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
+
+/* operation types for f_txchk_change() */
+#define TXCHK_CHG_TYPE_DIS1 3
+#define TXCHK_CHG_TYPE_ENAB1 2
+#define TXCHK_CHG_TYPE_KERN 1
+#define TXCHK_CHG_TYPE_USER 0
+
+#define QIB_CHASE_TIME msecs_to_jiffies(145)
+#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
+
+/* Private data for file operations */
+struct qib_filedata {
+ struct qib_ctxtdata *rcd;
+ unsigned subctxt;
+ unsigned tidcursor;
+ struct qib_user_sdma_queue *pq;
+ int rec_cpu_num; /* for cpu affinity; -1 if none */
+};
+
+extern struct list_head qib_dev_list;
+extern spinlock_t qib_devs_lock;
+extern struct qib_devdata *qib_lookup(int unit);
+extern u32 qib_cpulist_count;
+extern unsigned long *qib_cpulist;
+
+extern unsigned qib_wc_pat;
+int qib_init(struct qib_devdata *, int);
+int init_chip_wc_pat(struct qib_devdata *dd, u32);
+int qib_enable_wc(struct qib_devdata *dd);
+void qib_disable_wc(struct qib_devdata *dd);
+int qib_count_units(int *npresentp, int *nupp);
+int qib_count_active_units(void);
+
+int qib_cdev_init(int minor, const char *name,
+ const struct file_operations *fops,
+ struct cdev **cdevp, struct device **devp);
+void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
+int qib_dev_init(void);
+void qib_dev_cleanup(void);
+
+int qib_diag_add(struct qib_devdata *);
+void qib_diag_remove(struct qib_devdata *);
+void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
+void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */
+
+int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
+void qib_bad_intrstatus(struct qib_devdata *);
+void qib_handle_urcv(struct qib_devdata *, u64);
+
+/* clean up any per-chip chip-specific stuff */
+void qib_chip_cleanup(struct qib_devdata *);
+/* clean up any chip type-specific stuff */
+void qib_chip_done(void);
+
+/* check to see if we have to force ordering for write combining */
+int qib_unordered_wc(void);
+void qib_pio_copy(void __iomem *to, const void *from, size_t count);
+
+void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
+int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
+void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
+void qib_cancel_sends(struct qib_pportdata *);
+
+int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
+int qib_setup_eagerbufs(struct qib_ctxtdata *);
+void qib_set_ctxtcnt(struct qib_devdata *);
+int qib_create_ctxts(struct qib_devdata *dd);
+struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32);
+void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
+void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
+
+u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
+int qib_reset_device(int);
+int qib_wait_linkstate(struct qib_pportdata *, u32, int);
+int qib_set_linkstate(struct qib_pportdata *, u8);
+int qib_set_mtu(struct qib_pportdata *, u16);
+int qib_set_lid(struct qib_pportdata *, u32, u8);
+void qib_hol_down(struct qib_pportdata *);
+void qib_hol_init(struct qib_pportdata *);
+void qib_hol_up(struct qib_pportdata *);
+void qib_hol_event(unsigned long);
+void qib_disable_after_error(struct qib_devdata *);
+int qib_set_uevent_bits(struct qib_pportdata *, const int);
+
+/* for use in system calls, where we want to know device type, etc. */
+#define ctxt_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->rcd)
+#define subctxt_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->subctxt)
+#define tidcursor_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->tidcursor)
+#define user_sdma_queue_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->pq)
+
+static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
+{
+ return ppd->dd;
+}
+
+static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
+{
+ return container_of(dev, struct qib_devdata, verbs_dev);
+}
+
+static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
+{
+ return dd_from_dev(to_idev(ibdev));
+}
+
+static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
+{
+ return container_of(ibp, struct qib_pportdata, ibport_data);
+}
+
+static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ WARN_ON(pidx >= dd->num_pports);
+ return &dd->pport[pidx].ibport_data;
+}
+
+/*
+ * values for dd->flags (_device_ related flags) and
+ */
+#define QIB_HAS_LINK_LATENCY 0x1 /* supports link latency (IB 1.2) */
+#define QIB_INITTED 0x2 /* chip and driver up and initted */
+#define QIB_DOING_RESET 0x4 /* in the middle of doing chip reset */
+#define QIB_PRESENT 0x8 /* chip accesses can be done */
+#define QIB_PIO_FLUSH_WC 0x10 /* Needs Write combining flush for PIO */
+#define QIB_HAS_THRESH_UPDATE 0x40
+#define QIB_HAS_SDMA_TIMEOUT 0x80
+#define QIB_USE_SPCL_TRIG 0x100 /* SpecialTrigger launch enabled */
+#define QIB_NODMA_RTAIL 0x200 /* rcvhdrtail register DMA enabled */
+#define QIB_HAS_INTX 0x800 /* Supports INTx interrupts */
+#define QIB_HAS_SEND_DMA 0x1000 /* Supports Send DMA */
+#define QIB_HAS_VLSUPP 0x2000 /* Supports multiple VLs; PBC different */
+#define QIB_HAS_HDRSUPP 0x4000 /* Supports header suppression */
+#define QIB_BADINTR 0x8000 /* severe interrupt problems */
+#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
+#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
+
+/*
+ * values for ppd->lflags (_ib_port_ related flags)
+ */
+#define QIBL_LINKV 0x1 /* IB link state valid */
+#define QIBL_LINKDOWN 0x8 /* IB link is down */
+#define QIBL_LINKINIT 0x10 /* IB link level is up */
+#define QIBL_LINKARMED 0x20 /* IB link is ARMED */
+#define QIBL_LINKACTIVE 0x40 /* IB link is ACTIVE */
+/* leave a gap for more IB-link state */
+#define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */
+#define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */
+#define QIBL_IB_LINK_DISABLED 0x4000 /* Linkdown-disable forced,
+ * Do not try to bring up */
+#define QIBL_IB_FORCE_NOTIFY 0x8000 /* force notify on next ib change */
+
+/* IB dword length mask in PBC (lower 11 bits); same for all chips */
+#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
+
+
+/* ctxt_flag bit offsets */
+ /* waiting for a packet to arrive */
+#define QIB_CTXT_WAITING_RCV 2
+ /* master has not finished initializing */
+#define QIB_CTXT_MASTER_UNINIT 4
+ /* waiting for an urgent packet to arrive */
+#define QIB_CTXT_WAITING_URG 5
+
+/* free up any allocated data at closes */
+void qib_free_data(struct qib_ctxtdata *dd);
+void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
+ u32, struct qib_ctxtdata *);
+struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
+ const struct pci_device_id *);
+struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
+ const struct pci_device_id *);
+struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
+ const struct pci_device_id *);
+void qib_free_devdata(struct qib_devdata *);
+struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
+
+#define QIB_TWSI_NO_DEV 0xFF
+/* Below qib_twsi_ functions must be called with eep_lock held */
+int qib_twsi_reset(struct qib_devdata *dd);
+int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
+ int len);
+int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
+ const void *buffer, int len);
+void qib_get_eeprom_info(struct qib_devdata *);
+int qib_update_eeprom_log(struct qib_devdata *dd);
+void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
+void qib_dump_lookup_output_queue(struct qib_devdata *);
+void qib_force_pio_avail_update(struct qib_devdata *);
+void qib_clear_symerror_on_linkup(unsigned long opaque);
+
+/*
+ * Set LED override, only the two LSBs have "public" meaning, but
+ * any non-zero value substitutes them for the Link and LinkTrain
+ * LED states.
+ */
+#define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
+#define QIB_LED_LOG 2 /* Logical (link) YELLOW LED */
+void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
+
+/* send dma routines */
+int qib_setup_sdma(struct qib_pportdata *);
+void qib_teardown_sdma(struct qib_pportdata *);
+void __qib_sdma_intr(struct qib_pportdata *);
+void qib_sdma_intr(struct qib_pportdata *);
+int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
+ u32, struct qib_verbs_txreq *);
+/* ppd->sdma_lock should be locked before calling this. */
+int qib_sdma_make_progress(struct qib_pportdata *dd);
+
+/* must be called under qib_sdma_lock */
+static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
+{
+ return ppd->sdma_descq_cnt -
+ (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
+}
+
+static inline int __qib_sdma_running(struct qib_pportdata *ppd)
+{
+ return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
+}
+int qib_sdma_running(struct qib_pportdata *);
+
+void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
+void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
+
+/*
+ * number of words used for protocol header if not set by qib_userinit();
+ */
+#define QIB_DFLT_RCVHDRSIZE 9
+
+/*
+ * We need to be able to handle an IB header of at least 24 dwords.
+ * We need the rcvhdrq large enough to handle largest IB header, but
+ * still have room for a 2KB MTU standard IB packet.
+ * Additionally, some processor/memory controller combinations
+ * benefit quite strongly from having the DMA'ed data be cacheline
+ * aligned and a cacheline multiple, so we set the size to 32 dwords
+ * (2 64-byte primary cachelines for pretty much all processors of
+ * interest). The alignment hurts nothing, other than using somewhat
+ * more memory.
+ */
+#define QIB_RCVHDR_ENTSIZE 32
+
+int qib_get_user_pages(unsigned long, size_t, struct page **);
+void qib_release_user_pages(struct page **, size_t);
+int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
+int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
+u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
+void qib_sendbuf_done(struct qib_devdata *, unsigned);
+
+static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
+{
+ *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
+}
+
+static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
+{
+ /*
+ * volatile because it's a DMA target from the chip, routine is
+ * inlined, and don't want register caching or reordering.
+ */
+ return (u32) le64_to_cpu(
+ *((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
+}
+
+static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
+{
+ const struct qib_devdata *dd = rcd->dd;
+ u32 hdrqtail;
+
+ if (dd->flags & QIB_NODMA_RTAIL) {
+ __le32 *rhf_addr;
+ u32 seq;
+
+ rhf_addr = (__le32 *) rcd->rcvhdrq +
+ rcd->head + dd->rhf_offset;
+ seq = qib_hdrget_seq(rhf_addr);
+ hdrqtail = rcd->head;
+ if (seq == rcd->seq_cnt)
+ hdrqtail++;
+ } else
+ hdrqtail = qib_get_rcvhdrtail(rcd);
+
+ return hdrqtail;
+}
+
+/*
+ * sysfs interface.
+ */
+
+extern const char ib_qib_version[];
+
+int qib_device_create(struct qib_devdata *);
+void qib_device_remove(struct qib_devdata *);
+
+int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+ struct kobject *kobj);
+int qib_verbs_register_sysfs(struct qib_devdata *);
+void qib_verbs_unregister_sysfs(struct qib_devdata *);
+/* Hook for sysfs read of QSFP */
+extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
+
+int __init qib_init_qibfs(void);
+int __exit qib_exit_qibfs(void);
+
+int qibfs_add(struct qib_devdata *);
+int qibfs_remove(struct qib_devdata *);
+
+int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
+int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
+ const struct pci_device_id *);
+void qib_pcie_ddcleanup(struct qib_devdata *);
+int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *);
+int qib_reinit_intr(struct qib_devdata *);
+void qib_enable_intx(struct pci_dev *);
+void qib_nomsi(struct qib_devdata *);
+void qib_nomsix(struct qib_devdata *);
+void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
+void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
+
+/*
+ * dma_addr wrappers - all 0's invalid for hw
+ */
+dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
+ size_t, int);
+const char *qib_get_unit_name(int unit);
+
+/*
+ * Flush write combining store buffers (if present) and perform a write
+ * barrier.
+ */
+#if defined(CONFIG_X86_64)
+#define qib_flush_wc() asm volatile("sfence" : : : "memory")
+#else
+#define qib_flush_wc() wmb() /* no reorder around wc flush */
+#endif
+
+/* global module parameter variables */
+extern unsigned qib_ibmtu;
+extern ushort qib_cfgctxts;
+extern ushort qib_num_cfg_vls;
+extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */
+extern unsigned qib_n_krcv_queues;
+extern unsigned qib_sdma_fetch_arb;
+extern unsigned qib_compat_ddr_negotiate;
+extern int qib_special_trigger;
+
+extern struct mutex qib_mutex;
+
+/* Number of seconds before our card status check... */
+#define STATUS_TIMEOUT 60
+
+#define QIB_DRV_NAME "ib_qib"
+#define QIB_USER_MINOR_BASE 0
+#define QIB_TRACE_MINOR 127
+#define QIB_DIAGPKT_MINOR 128
+#define QIB_DIAG_MINOR_BASE 129
+#define QIB_NMINORS 255
+
+#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
+#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
+#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
+
+/*
+ * qib_early_err is used (only!) to print early errors before devdata is
+ * allocated, or when dd->pcidev may not be valid, and at the tail end of
+ * cleanup when devdata may have been freed, etc. qib_dev_porterr is
+ * the same as qib_dev_err, but is used when the message really needs
+ * the IB port# to be definitive as to what's happening..
+ * All of these go to the trace log, and the trace log entry is done
+ * first to avoid possible serial port delays from printk.
+ */
+#define qib_early_err(dev, fmt, ...) \
+ do { \
+ dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define qib_dev_err(dd, fmt, ...) \
+ do { \
+ dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
+ qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
+ } while (0)
+
+#define qib_dev_porterr(dd, port, fmt, ...) \
+ do { \
+ dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
+ qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define qib_devinfo(pcidev, fmt, ...) \
+ do { \
+ dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/*
+ * this is used for formatting hw error messages...
+ */
+struct qib_hwerror_msgs {
+ u64 mask;
+ const char *msg;
+};
+
+#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
+
+/* in qib_intr.c... */
+void qib_format_hwerrors(u64 hwerrs,
+ const struct qib_hwerror_msgs *hwerrmsgs,
+ size_t nhwerrmsgs, char *msg, size_t lmsg);
+#endif /* _QIB_KERNEL_H */
diff --git a/drivers/infiniband/hw/qib/qib_6120_regs.h b/drivers/infiniband/hw/qib/qib_6120_regs.h
new file mode 100644
index 000000000000..e16cb6f7de2c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_6120_regs.h
@@ -0,0 +1,977 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_6120_Revision_OFFS 0x0
+#define QIB_6120_Revision_R_Simulator_LSB 0x3F
+#define QIB_6120_Revision_R_Simulator_RMASK 0x1
+#define QIB_6120_Revision_Reserved_LSB 0x28
+#define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF
+#define QIB_6120_Revision_BoardID_LSB 0x20
+#define QIB_6120_Revision_BoardID_RMASK 0xFF
+#define QIB_6120_Revision_R_SW_LSB 0x18
+#define QIB_6120_Revision_R_SW_RMASK 0xFF
+#define QIB_6120_Revision_R_Arch_LSB 0x10
+#define QIB_6120_Revision_R_Arch_RMASK 0xFF
+#define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_6120_Control_OFFS 0x8
+#define QIB_6120_Control_TxLatency_LSB 0x4
+#define QIB_6120_Control_TxLatency_RMASK 0x1
+#define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_6120_Control_LinkEn_LSB 0x2
+#define QIB_6120_Control_LinkEn_RMASK 0x1
+#define QIB_6120_Control_FreezeMode_LSB 0x1
+#define QIB_6120_Control_FreezeMode_RMASK 0x1
+#define QIB_6120_Control_SyncReset_LSB 0x0
+#define QIB_6120_Control_SyncReset_RMASK 0x1
+
+#define QIB_6120_PageAlign_OFFS 0x10
+
+#define QIB_6120_PortCnt_OFFS 0x18
+
+#define QIB_6120_SendRegBase_OFFS 0x30
+
+#define QIB_6120_UserRegBase_OFFS 0x38
+
+#define QIB_6120_CntrRegBase_OFFS 0x40
+
+#define QIB_6120_Scratch_OFFS 0x48
+#define QIB_6120_Scratch_TopHalf_LSB 0x20
+#define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF
+#define QIB_6120_Scratch_BottomHalf_LSB 0x0
+#define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF
+
+#define QIB_6120_IntBlocked_OFFS 0x60
+#define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F
+#define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E
+#define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D
+#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C
+#define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_Reserved_LSB 0xF
+#define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10
+#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF
+#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE
+#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD
+#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC
+#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_Reserved1_LSB 0x5
+#define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F
+#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4
+#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3
+#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2
+#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1
+#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0
+#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1
+
+#define QIB_6120_IntMask_OFFS 0x68
+#define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F
+#define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1
+#define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E
+#define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1
+#define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D
+#define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1
+#define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C
+#define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1
+#define QIB_6120_IntMask_Reserved_LSB 0x11
+#define QIB_6120_IntMask_Reserved_RMASK 0x7FF
+#define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10
+#define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF
+#define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE
+#define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD
+#define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC
+#define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1
+#define QIB_6120_IntMask_Reserved1_LSB 0x5
+#define QIB_6120_IntMask_Reserved1_RMASK 0x7F
+#define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4
+#define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3
+#define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2
+#define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1
+#define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0
+#define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1
+
+#define QIB_6120_IntStatus_OFFS 0x70
+#define QIB_6120_IntStatus_Error_LSB 0x1F
+#define QIB_6120_IntStatus_Error_RMASK 0x1
+#define QIB_6120_IntStatus_PioSent_LSB 0x1E
+#define QIB_6120_IntStatus_PioSent_RMASK 0x1
+#define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D
+#define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1
+#define QIB_6120_IntStatus_assertGPIO_LSB 0x1C
+#define QIB_6120_IntStatus_assertGPIO_RMASK 0x1
+#define QIB_6120_IntStatus_Reserved_LSB 0xF
+#define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntStatus_RcvAvail4_LSB 0x10
+#define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail3_LSB 0xF
+#define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail2_LSB 0xE
+#define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail1_LSB 0xD
+#define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail0_LSB 0xC
+#define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1
+#define QIB_6120_IntStatus_Reserved1_LSB 0x5
+#define QIB_6120_IntStatus_Reserved1_RMASK 0x7F
+#define QIB_6120_IntStatus_RcvUrg4_LSB 0x4
+#define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg3_LSB 0x3
+#define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg2_LSB 0x2
+#define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg1_LSB 0x1
+#define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg0_LSB 0x0
+#define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1
+
+#define QIB_6120_IntClear_OFFS 0x78
+#define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F
+#define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1
+#define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E
+#define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1
+#define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D
+#define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1
+#define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C
+#define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1
+#define QIB_6120_IntClear_Reserved_LSB 0xF
+#define QIB_6120_IntClear_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10
+#define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF
+#define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE
+#define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD
+#define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC
+#define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1
+#define QIB_6120_IntClear_Reserved1_LSB 0x5
+#define QIB_6120_IntClear_Reserved1_RMASK 0x7F
+#define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4
+#define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3
+#define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2
+#define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1
+#define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0
+#define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1
+
+#define QIB_6120_ErrMask_OFFS 0x80
+#define QIB_6120_ErrMask_Reserved_LSB 0x34
+#define QIB_6120_ErrMask_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33
+#define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32
+#define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31
+#define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30
+#define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1
+#define QIB_6120_ErrMask_Reserved1_LSB 0x26
+#define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
+#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20
+#define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F
+#define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_Reserved2_LSB 0x12
+#define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10
+#define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF
+#define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE
+#define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB
+#define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA
+#define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9
+#define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2
+#define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1
+#define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0
+#define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_6120_ErrStatus_OFFS 0x88
+#define QIB_6120_ErrStatus_Reserved_LSB 0x34
+#define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrStatus_HardwareErr_LSB 0x33
+#define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_6120_ErrStatus_ResetNegated_LSB 0x32
+#define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31
+#define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30
+#define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1
+#define QIB_6120_ErrStatus_Reserved1_LSB 0x26
+#define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25
+#define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23
+#define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22
+#define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20
+#define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F
+#define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E
+#define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D
+#define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_Reserved2_LSB 0x12
+#define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11
+#define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10
+#define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF
+#define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE
+#define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB
+#define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA
+#define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9
+#define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6
+#define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5
+#define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4
+#define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3
+#define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2
+#define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1
+#define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0
+#define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1
+
+#define QIB_6120_ErrClear_OFFS 0x90
+#define QIB_6120_ErrClear_Reserved_LSB 0x34
+#define QIB_6120_ErrClear_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33
+#define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32
+#define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31
+#define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30
+#define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1
+#define QIB_6120_ErrClear_Reserved1_LSB 0x26
+#define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
+#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20
+#define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F
+#define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_Reserved2_LSB 0x12
+#define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10
+#define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF
+#define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE
+#define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB
+#define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA
+#define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9
+#define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2
+#define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1
+#define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0
+#define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_6120_HwErrMask_OFFS 0x98
+#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
+#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
+#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
+#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved_LSB 0x3D
+#define QIB_6120_HwErrMask_Reserved_RMASK 0x1
+#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
+#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B
+#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A
+#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved1_LSB 0x39
+#define QIB_6120_HwErrMask_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38
+#define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1
+#define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37
+#define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved2_LSB 0x33
+#define QIB_6120_HwErrMask_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C
+#define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
+#define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28
+#define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF
+#define QIB_6120_HwErrMask_Reserved3_LSB 0x22
+#define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D
+#define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved4_LSB 0x6
+#define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0
+#define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F
+
+#define QIB_6120_HwErrStatus_OFFS 0xA0
+#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
+#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved_LSB 0x3D
+#define QIB_6120_HwErrStatus_Reserved_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
+#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B
+#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A
+#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved1_LSB 0x39
+#define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38
+#define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37
+#define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1
+#define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved2_LSB 0x33
+#define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C
+#define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F
+#define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28
+#define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF
+#define QIB_6120_HwErrStatus_Reserved3_LSB 0x22
+#define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D
+#define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved4_LSB 0x6
+#define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0
+#define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F
+
+#define QIB_6120_HwErrClear_OFFS 0xA8
+#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
+#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
+#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
+#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved_LSB 0x3D
+#define QIB_6120_HwErrClear_Reserved_RMASK 0x1
+#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
+#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B
+#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A
+#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved1_LSB 0x39
+#define QIB_6120_HwErrClear_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38
+#define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1
+#define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37
+#define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved2_LSB 0x33
+#define QIB_6120_HwErrClear_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C
+#define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F
+#define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28
+#define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF
+#define QIB_6120_HwErrClear_Reserved3_LSB 0x22
+#define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F
+#define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7
+#define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D
+#define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved4_LSB 0x6
+#define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0
+#define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F
+
+#define QIB_6120_HwDiagCtrl_OFFS 0xB0
+#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
+#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33
+#define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF
+#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
+#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
+#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
+#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
+#define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23
+#define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F
+#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6
+#define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF
+#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
+#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F
+
+#define QIB_6120_IBCStatus_OFFS 0xC0
+#define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F
+#define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1
+#define QIB_6120_IBCStatus_TxReady_LSB 0x1E
+#define QIB_6120_IBCStatus_TxReady_RMASK 0x1
+#define QIB_6120_IBCStatus_Reserved_LSB 0x7
+#define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF
+#define QIB_6120_IBCStatus_LinkState_LSB 0x4
+#define QIB_6120_IBCStatus_LinkState_RMASK 0x7
+#define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0
+#define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF
+
+#define QIB_6120_IBCCtrl_OFFS 0xC8
+#define QIB_6120_IBCCtrl_Loopback_LSB 0x3F
+#define QIB_6120_IBCCtrl_Loopback_RMASK 0x1
+#define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E
+#define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1
+#define QIB_6120_IBCCtrl_Reserved_LSB 0x2B
+#define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_6120_IBCCtrl_CreditScale_LSB 0x28
+#define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7
+#define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24
+#define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF
+#define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20
+#define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF
+#define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F
+#define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1
+#define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14
+#define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF
+#define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12
+#define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3
+#define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10
+#define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3
+#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
+#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0
+#define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_6120_EXTStatus_OFFS 0xD0
+#define QIB_6120_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_Reserved_LSB 0x20
+#define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_Reserved1_LSB 0x10
+#define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF
+#define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1
+#define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1
+#define QIB_6120_EXTStatus_Reserved2_LSB 0x0
+#define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF
+
+#define QIB_6120_EXTCtrl_OFFS 0xD8
+#define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_6120_EXTCtrl_Reserved_LSB 0x4
+#define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF
+#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
+#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
+#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
+#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0
+#define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
+
+#define QIB_6120_GPIOOut_OFFS 0xE0
+
+#define QIB_6120_GPIOMask_OFFS 0xE8
+
+#define QIB_6120_GPIOStatus_OFFS 0xF0
+
+#define QIB_6120_GPIOClear_OFFS 0xF8
+
+#define QIB_6120_RcvCtrl_OFFS 0x100
+#define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F
+#define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E
+#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_6120_RcvCtrl_Reserved_LSB 0x15
+#define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF
+#define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10
+#define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F
+#define QIB_6120_RcvCtrl_Reserved1_LSB 0x9
+#define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F
+#define QIB_6120_RcvCtrl_Reserved2_LSB 0x5
+#define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF
+#define QIB_6120_RcvCtrl_PortEnable_LSB 0x0
+#define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F
+
+#define QIB_6120_RcvBTHQP_OFFS 0x108
+#define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E
+#define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3
+#define QIB_6120_RcvBTHQP_Reserved_LSB 0x18
+#define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F
+#define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0
+#define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_6120_RcvHdrSize_OFFS 0x110
+
+#define QIB_6120_RcvHdrCnt_OFFS 0x118
+
+#define QIB_6120_RcvHdrEntSize_OFFS 0x120
+
+#define QIB_6120_RcvTIDBase_OFFS 0x128
+
+#define QIB_6120_RcvTIDCnt_OFFS 0x130
+
+#define QIB_6120_RcvEgrBase_OFFS 0x138
+
+#define QIB_6120_RcvEgrCnt_OFFS 0x140
+
+#define QIB_6120_RcvBufBase_OFFS 0x148
+
+#define QIB_6120_RcvBufSize_OFFS 0x150
+
+#define QIB_6120_RxIntMemBase_OFFS 0x158
+
+#define QIB_6120_RxIntMemSize_OFFS 0x160
+
+#define QIB_6120_RcvPartitionKey_OFFS 0x168
+
+#define QIB_6120_RcvPktLEDCnt_OFFS 0x178
+#define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20
+#define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0
+#define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_6120_SendCtrl_OFFS 0x1C0
+#define QIB_6120_SendCtrl_Disarm_LSB 0x1F
+#define QIB_6120_SendCtrl_Disarm_RMASK 0x1
+#define QIB_6120_SendCtrl_Reserved_LSB 0x17
+#define QIB_6120_SendCtrl_Reserved_RMASK 0xFF
+#define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10
+#define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F
+#define QIB_6120_SendCtrl_Reserved1_LSB 0x4
+#define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF
+#define QIB_6120_SendCtrl_PIOEnable_LSB 0x3
+#define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1
+#define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2
+#define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1
+#define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1
+#define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1
+#define QIB_6120_SendCtrl_Abort_LSB 0x0
+#define QIB_6120_SendCtrl_Abort_RMASK 0x1
+
+#define QIB_6120_SendPIOBufBase_OFFS 0x1C8
+#define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35
+#define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF
+#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15
+#define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF
+#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_6120_SendPIOSize_OFFS 0x1D0
+#define QIB_6120_SendPIOSize_Reserved_LSB 0x2D
+#define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF
+#define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_6120_SendPIOSize_Reserved1_LSB 0xC
+#define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF
+#define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_6120_SendPIOBufCnt_OFFS 0x1D8
+#define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24
+#define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF
+#define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF
+#define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9
+#define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF
+#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF
+
+#define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0
+#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6
+#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF
+#define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0
+#define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F
+
+#define QIB_6120_SendBufErr0_OFFS 0x240
+#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0
+#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0
+
+#define QIB_6120_RcvHdrAddr0_OFFS 0x280
+#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
+#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0
+#define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3
+
+#define QIB_6120_RcvHdrTailAddr0_OFFS 0x300
+#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
+#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0
+#define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3
+
+#define QIB_6120_SerdesCfg0_OFFS 0x3C0
+#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F
+#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1
+#define QIB_6120_SerdesCfg0_Reserved_LSB 0x38
+#define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F
+#define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36
+#define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3
+#define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34
+#define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3
+#define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32
+#define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3
+#define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31
+#define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1
+#define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30
+#define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F
+#define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E
+#define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D
+#define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C
+#define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1
+#define QIB_6120_SerdesCfg0_PW_LSB 0x2B
+#define QIB_6120_SerdesCfg0_PW_RMASK 0x1
+#define QIB_6120_SerdesCfg0_RefSel_LSB 0x29
+#define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3
+#define QIB_6120_SerdesCfg0_ParReset_LSB 0x28
+#define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27
+#define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1
+#define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26
+#define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1
+#define QIB_6120_SerdesCfg0_Offset_LSB 0x1E
+#define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF
+#define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D
+#define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C
+#define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1
+#define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18
+#define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14
+#define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10
+#define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC
+#define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8
+#define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7
+#define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6
+#define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5
+#define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4
+#define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetA_LSB 0x3
+#define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetB_LSB 0x2
+#define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetC_LSB 0x1
+#define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetD_LSB 0x0
+#define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1
+
+#define QIB_6120_SerdesStat_OFFS 0x3D0
+#define QIB_6120_SerdesStat_Reserved_LSB 0xC
+#define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF
+#define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB
+#define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA
+#define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9
+#define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8
+#define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetA_LSB 0x7
+#define QIB_6120_SerdesStat_RxDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetB_LSB 0x6
+#define QIB_6120_SerdesStat_RxDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetC_LSB 0x5
+#define QIB_6120_SerdesStat_RxDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetD_LSB 0x4
+#define QIB_6120_SerdesStat_RxDetD_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3
+#define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2
+#define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1
+#define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0
+#define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1
+
+#define QIB_6120_XGXSCfg_OFFS 0x3D8
+#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F
+#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1
+#define QIB_6120_XGXSCfg_Reserved_LSB 0x17
+#define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF
+#define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13
+#define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF
+#define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9
+#define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF
+#define QIB_6120_XGXSCfg_port_addr_LSB 0x4
+#define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F
+#define QIB_6120_XGXSCfg_mdd_30_LSB 0x3
+#define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1
+#define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2
+#define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1
+#define QIB_6120_XGXSCfg_Reserved1_LSB 0x1
+#define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1
+#define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0
+#define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1
+
+#define QIB_6120_LBIntCnt_OFFS 0x12000
+
+#define QIB_6120_LBFlowStallCnt_OFFS 0x12008
+
+#define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018
+
+#define QIB_6120_TxDataPktCnt_OFFS 0x12020
+
+#define QIB_6120_TxFlowPktCnt_OFFS 0x12028
+
+#define QIB_6120_TxDwordCnt_OFFS 0x12030
+
+#define QIB_6120_TxLenErrCnt_OFFS 0x12038
+
+#define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040
+
+#define QIB_6120_TxUnderrunCnt_OFFS 0x12048
+
+#define QIB_6120_TxFlowStallCnt_OFFS 0x12050
+
+#define QIB_6120_TxDroppedPktCnt_OFFS 0x12058
+
+#define QIB_6120_RxDroppedPktCnt_OFFS 0x12060
+
+#define QIB_6120_RxDataPktCnt_OFFS 0x12068
+
+#define QIB_6120_RxFlowPktCnt_OFFS 0x12070
+
+#define QIB_6120_RxDwordCnt_OFFS 0x12078
+
+#define QIB_6120_RxLenErrCnt_OFFS 0x12080
+
+#define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088
+
+#define QIB_6120_RxICRCErrCnt_OFFS 0x12090
+
+#define QIB_6120_RxVCRCErrCnt_OFFS 0x12098
+
+#define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0
+
+#define QIB_6120_RxBadFormatCnt_OFFS 0x120A8
+
+#define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0
+
+#define QIB_6120_RxEBPCnt_OFFS 0x120B8
+
+#define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0
+
+#define QIB_6120_RxBufOvflCnt_OFFS 0x120C8
+
+#define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0
+
+#define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8
+
+#define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0
+
+#define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8
+
+#define QIB_6120_IBStatusChangeCnt_OFFS 0x12140
+
+#define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148
+
+#define QIB_6120_IBLinkDownedCnt_OFFS 0x12150
+
+#define QIB_6120_IBSymbolErrCnt_OFFS 0x12158
+
+#define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170
+
+#define QIB_6120_RcvEgrArray0_OFFS 0x14000
+
+#define QIB_6120_RcvTIDArray0_OFFS 0x54000
+
+#define QIB_6120_PIOLaunchFIFO_OFFS 0x64000
+
+#define QIB_6120_SendPIOpbcCache_OFFS 0x64800
+
+#define QIB_6120_RcvBuf1_OFFS 0x72000
+
+#define QIB_6120_RcvBuf2_OFFS 0x75000
+
+#define QIB_6120_RcvFlags_OFFS 0x77000
+
+#define QIB_6120_RcvLookupBuf1_OFFS 0x79000
+
+#define QIB_6120_RcvDMABuf_OFFS 0x7B000
+
+#define QIB_6120_MiscRXEIntMem_OFFS 0x7C000
+
+#define QIB_6120_PCIERcvBuf_OFFS 0x80000
+
+#define QIB_6120_PCIERetryBuf_OFFS 0x82000
+
+#define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000
+
+#define QIB_6120_PIOBuf0_MA_OFFS 0x100000
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
new file mode 100644
index 000000000000..ea0bfd896f92
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -0,0 +1,156 @@
+#ifndef _QIB_7220_H
+#define _QIB_7220_H
+/*
+ * Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* grab register-defs auto-generated by HW */
+#include "qib_7220_regs.h"
+
+/* The number of eager receive TIDs for context zero. */
+#define IBA7220_KRCVEGRCNT 2048U
+
+#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
+#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
+#define IB_7220_LT_STATE_TXREVLANES 0x0d
+#define IB_7220_LT_STATE_CFGENH 0x10
+
+struct qib_chip_specific {
+ u64 __iomem *cregbase;
+ u64 *cntrs;
+ u64 *portcntrs;
+ spinlock_t sdepb_lock; /* serdes EPB bus */
+ spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+ spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+ u64 hwerrmask;
+ u64 errormask;
+ u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+ u64 gpio_mask; /* shadow the gpio mask register */
+ u64 extctrl; /* shadow the gpio output enable, etc... */
+ u32 ncntrs;
+ u32 nportcntrs;
+ u32 cntrnamelen;
+ u32 portcntrnamelen;
+ u32 numctxts;
+ u32 rcvegrcnt;
+ u32 autoneg_tries;
+ u32 serdes_first_init_done;
+ u32 sdmabufcnt;
+ u32 lastbuf_for_pio;
+ u32 updthresh; /* current AvailUpdThld */
+ u32 updthresh_dflt; /* default AvailUpdThld */
+ int irq;
+ u8 presets_needed;
+ u8 relock_timer_active;
+ char emsgbuf[128];
+ char sdmamsgbuf[192];
+ char bitsmsgbuf[64];
+ struct timer_list relock_timer;
+ unsigned int relock_interval; /* in jiffies */
+};
+
+struct qib_chippport_specific {
+ struct qib_pportdata pportdata;
+ wait_queue_head_t autoneg_wait;
+ struct delayed_work autoneg_work;
+ struct timer_list chase_timer;
+ /*
+ * these 5 fields are used to establish deltas for IB symbol
+ * errors and linkrecovery errors. They can be reported on
+ * some chips during link negotiation prior to INIT, and with
+ * DDR when faking DDR negotiations with non-IBTA switches.
+ * The chip counters are adjusted at driver unload if there is
+ * a non-zero delta.
+ */
+ u64 ibdeltainprog;
+ u64 ibsymdelta;
+ u64 ibsymsnap;
+ u64 iblnkerrdelta;
+ u64 iblnkerrsnap;
+ u64 ibcctrl; /* kr_ibcctrl shadow */
+ u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
+ u64 chase_end;
+ u32 last_delay_mult;
+};
+
+/*
+ * This header file provides the declarations and common definitions
+ * for (mostly) manipulation of the SerDes blocks within the IBA7220.
+ * the functions declared should only be called from within other
+ * 7220-related files such as qib_iba7220.c or qib_sd7220.c.
+ */
+int qib_sd7220_presets(struct qib_devdata *dd);
+int qib_sd7220_init(struct qib_devdata *dd);
+int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, u8 *img,
+ int len, int offset);
+int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, const u8 *img,
+ int len, int offset);
+void qib_sd7220_clr_ibpar(struct qib_devdata *);
+/*
+ * Below used for sdnum parameter, selecting one of the two sections
+ * used for PCIe, or the single SerDes used for IB, which is the
+ * only one currently used
+ */
+#define IB_7220_SERDES 2
+
+int qib_sd7220_ib_load(struct qib_devdata *dd);
+int qib_sd7220_ib_vfy(struct qib_devdata *dd);
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readl((u32 __iomem *)&dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+
+ return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u16 regno, u64 value)
+{
+ if (dd->kregbase)
+ writeq(value, &dd->kregbase[regno]);
+}
+
+void set_7220_relock_poll(struct qib_devdata *, int);
+void shutdown_7220_relock_poll(struct qib_devdata *);
+void toggle_7220_rclkrls(struct qib_devdata *);
+
+
+#endif /* _QIB_7220_H */
diff --git a/drivers/infiniband/hw/qib/qib_7220_regs.h b/drivers/infiniband/hw/qib/qib_7220_regs.h
new file mode 100644
index 000000000000..0da5bb750e52
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220_regs.h
@@ -0,0 +1,1496 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_7220_Revision_OFFS 0x0
+#define QIB_7220_Revision_R_Simulator_LSB 0x3F
+#define QIB_7220_Revision_R_Simulator_RMASK 0x1
+#define QIB_7220_Revision_R_Emulation_LSB 0x3E
+#define QIB_7220_Revision_R_Emulation_RMASK 0x1
+#define QIB_7220_Revision_R_Emulation_Revcode_LSB 0x28
+#define QIB_7220_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
+#define QIB_7220_Revision_BoardID_LSB 0x20
+#define QIB_7220_Revision_BoardID_RMASK 0xFF
+#define QIB_7220_Revision_R_SW_LSB 0x18
+#define QIB_7220_Revision_R_SW_RMASK 0xFF
+#define QIB_7220_Revision_R_Arch_LSB 0x10
+#define QIB_7220_Revision_R_Arch_RMASK 0xFF
+#define QIB_7220_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_7220_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_7220_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_7220_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_7220_Control_OFFS 0x8
+#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_LSB 0x7
+#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_RMASK 0x1
+#define QIB_7220_Control_PCIECplQDiagEn_LSB 0x6
+#define QIB_7220_Control_PCIECplQDiagEn_RMASK 0x1
+#define QIB_7220_Control_Reserved_LSB 0x5
+#define QIB_7220_Control_Reserved_RMASK 0x1
+#define QIB_7220_Control_TxLatency_LSB 0x4
+#define QIB_7220_Control_TxLatency_RMASK 0x1
+#define QIB_7220_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_7220_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_7220_Control_LinkEn_LSB 0x2
+#define QIB_7220_Control_LinkEn_RMASK 0x1
+#define QIB_7220_Control_FreezeMode_LSB 0x1
+#define QIB_7220_Control_FreezeMode_RMASK 0x1
+#define QIB_7220_Control_SyncReset_LSB 0x0
+#define QIB_7220_Control_SyncReset_RMASK 0x1
+
+#define QIB_7220_PageAlign_OFFS 0x10
+
+#define QIB_7220_PortCnt_OFFS 0x18
+
+#define QIB_7220_SendRegBase_OFFS 0x30
+
+#define QIB_7220_UserRegBase_OFFS 0x38
+
+#define QIB_7220_CntrRegBase_OFFS 0x40
+
+#define QIB_7220_Scratch_OFFS 0x48
+
+#define QIB_7220_IntMask_OFFS 0x68
+#define QIB_7220_IntMask_SDmaIntMask_LSB 0x3F
+#define QIB_7220_IntMask_SDmaIntMask_RMASK 0x1
+#define QIB_7220_IntMask_SDmaDisabledMasked_LSB 0x3E
+#define QIB_7220_IntMask_SDmaDisabledMasked_RMASK 0x1
+#define QIB_7220_IntMask_Reserved_LSB 0x31
+#define QIB_7220_IntMask_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntMask_RcvUrg16IntMask_LSB 0x30
+#define QIB_7220_IntMask_RcvUrg16IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg15IntMask_LSB 0x2F
+#define QIB_7220_IntMask_RcvUrg15IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg14IntMask_LSB 0x2E
+#define QIB_7220_IntMask_RcvUrg14IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg13IntMask_LSB 0x2D
+#define QIB_7220_IntMask_RcvUrg13IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg12IntMask_LSB 0x2C
+#define QIB_7220_IntMask_RcvUrg12IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg11IntMask_LSB 0x2B
+#define QIB_7220_IntMask_RcvUrg11IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg10IntMask_LSB 0x2A
+#define QIB_7220_IntMask_RcvUrg10IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg9IntMask_LSB 0x29
+#define QIB_7220_IntMask_RcvUrg9IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg8IntMask_LSB 0x28
+#define QIB_7220_IntMask_RcvUrg8IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg7IntMask_LSB 0x27
+#define QIB_7220_IntMask_RcvUrg7IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg6IntMask_LSB 0x26
+#define QIB_7220_IntMask_RcvUrg6IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg5IntMask_LSB 0x25
+#define QIB_7220_IntMask_RcvUrg5IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg4IntMask_LSB 0x24
+#define QIB_7220_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg3IntMask_LSB 0x23
+#define QIB_7220_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg2IntMask_LSB 0x22
+#define QIB_7220_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg1IntMask_LSB 0x21
+#define QIB_7220_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg0IntMask_LSB 0x20
+#define QIB_7220_IntMask_RcvUrg0IntMask_RMASK 0x1
+#define QIB_7220_IntMask_ErrorIntMask_LSB 0x1F
+#define QIB_7220_IntMask_ErrorIntMask_RMASK 0x1
+#define QIB_7220_IntMask_PioSetIntMask_LSB 0x1E
+#define QIB_7220_IntMask_PioSetIntMask_RMASK 0x1
+#define QIB_7220_IntMask_PioBufAvailIntMask_LSB 0x1D
+#define QIB_7220_IntMask_PioBufAvailIntMask_RMASK 0x1
+#define QIB_7220_IntMask_assertGPIOIntMask_LSB 0x1C
+#define QIB_7220_IntMask_assertGPIOIntMask_RMASK 0x1
+#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_LSB 0x1B
+#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_RMASK 0x1
+#define QIB_7220_IntMask_JIntMask_LSB 0x1A
+#define QIB_7220_IntMask_JIntMask_RMASK 0x1
+#define QIB_7220_IntMask_Reserved1_LSB 0x11
+#define QIB_7220_IntMask_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntMask_RcvAvail16IntMask_LSB 0x10
+#define QIB_7220_IntMask_RcvAvail16IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail15IntMask_LSB 0xF
+#define QIB_7220_IntMask_RcvAvail15IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail14IntMask_LSB 0xE
+#define QIB_7220_IntMask_RcvAvail14IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail13IntMask_LSB 0xD
+#define QIB_7220_IntMask_RcvAvail13IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail12IntMask_LSB 0xC
+#define QIB_7220_IntMask_RcvAvail12IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail11IntMask_LSB 0xB
+#define QIB_7220_IntMask_RcvAvail11IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail10IntMask_LSB 0xA
+#define QIB_7220_IntMask_RcvAvail10IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail9IntMask_LSB 0x9
+#define QIB_7220_IntMask_RcvAvail9IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail8IntMask_LSB 0x8
+#define QIB_7220_IntMask_RcvAvail8IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail7IntMask_LSB 0x7
+#define QIB_7220_IntMask_RcvAvail7IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail6IntMask_LSB 0x6
+#define QIB_7220_IntMask_RcvAvail6IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail5IntMask_LSB 0x5
+#define QIB_7220_IntMask_RcvAvail5IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail4IntMask_LSB 0x4
+#define QIB_7220_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail3IntMask_LSB 0x3
+#define QIB_7220_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail2IntMask_LSB 0x2
+#define QIB_7220_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail1IntMask_LSB 0x1
+#define QIB_7220_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail0IntMask_LSB 0x0
+#define QIB_7220_IntMask_RcvAvail0IntMask_RMASK 0x1
+
+#define QIB_7220_IntStatus_OFFS 0x70
+#define QIB_7220_IntStatus_SDmaInt_LSB 0x3F
+#define QIB_7220_IntStatus_SDmaInt_RMASK 0x1
+#define QIB_7220_IntStatus_SDmaDisabled_LSB 0x3E
+#define QIB_7220_IntStatus_SDmaDisabled_RMASK 0x1
+#define QIB_7220_IntStatus_Reserved_LSB 0x31
+#define QIB_7220_IntStatus_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntStatus_RcvUrg16_LSB 0x30
+#define QIB_7220_IntStatus_RcvUrg16_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg15_LSB 0x2F
+#define QIB_7220_IntStatus_RcvUrg15_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg14_LSB 0x2E
+#define QIB_7220_IntStatus_RcvUrg14_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg13_LSB 0x2D
+#define QIB_7220_IntStatus_RcvUrg13_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg12_LSB 0x2C
+#define QIB_7220_IntStatus_RcvUrg12_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg11_LSB 0x2B
+#define QIB_7220_IntStatus_RcvUrg11_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg10_LSB 0x2A
+#define QIB_7220_IntStatus_RcvUrg10_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg9_LSB 0x29
+#define QIB_7220_IntStatus_RcvUrg9_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg8_LSB 0x28
+#define QIB_7220_IntStatus_RcvUrg8_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg7_LSB 0x27
+#define QIB_7220_IntStatus_RcvUrg7_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg6_LSB 0x26
+#define QIB_7220_IntStatus_RcvUrg6_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg5_LSB 0x25
+#define QIB_7220_IntStatus_RcvUrg5_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg4_LSB 0x24
+#define QIB_7220_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg3_LSB 0x23
+#define QIB_7220_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg2_LSB 0x22
+#define QIB_7220_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg1_LSB 0x21
+#define QIB_7220_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg0_LSB 0x20
+#define QIB_7220_IntStatus_RcvUrg0_RMASK 0x1
+#define QIB_7220_IntStatus_Error_LSB 0x1F
+#define QIB_7220_IntStatus_Error_RMASK 0x1
+#define QIB_7220_IntStatus_PioSent_LSB 0x1E
+#define QIB_7220_IntStatus_PioSent_RMASK 0x1
+#define QIB_7220_IntStatus_PioBufAvail_LSB 0x1D
+#define QIB_7220_IntStatus_PioBufAvail_RMASK 0x1
+#define QIB_7220_IntStatus_assertGPIO_LSB 0x1C
+#define QIB_7220_IntStatus_assertGPIO_RMASK 0x1
+#define QIB_7220_IntStatus_IBSerdesTrimDone_LSB 0x1B
+#define QIB_7220_IntStatus_IBSerdesTrimDone_RMASK 0x1
+#define QIB_7220_IntStatus_JInt_LSB 0x1A
+#define QIB_7220_IntStatus_JInt_RMASK 0x1
+#define QIB_7220_IntStatus_Reserved1_LSB 0x11
+#define QIB_7220_IntStatus_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntStatus_RcvAvail16_LSB 0x10
+#define QIB_7220_IntStatus_RcvAvail16_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail15_LSB 0xF
+#define QIB_7220_IntStatus_RcvAvail15_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail14_LSB 0xE
+#define QIB_7220_IntStatus_RcvAvail14_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail13_LSB 0xD
+#define QIB_7220_IntStatus_RcvAvail13_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail12_LSB 0xC
+#define QIB_7220_IntStatus_RcvAvail12_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail11_LSB 0xB
+#define QIB_7220_IntStatus_RcvAvail11_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail10_LSB 0xA
+#define QIB_7220_IntStatus_RcvAvail10_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail9_LSB 0x9
+#define QIB_7220_IntStatus_RcvAvail9_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail8_LSB 0x8
+#define QIB_7220_IntStatus_RcvAvail8_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail7_LSB 0x7
+#define QIB_7220_IntStatus_RcvAvail7_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail6_LSB 0x6
+#define QIB_7220_IntStatus_RcvAvail6_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail5_LSB 0x5
+#define QIB_7220_IntStatus_RcvAvail5_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail4_LSB 0x4
+#define QIB_7220_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail3_LSB 0x3
+#define QIB_7220_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail2_LSB 0x2
+#define QIB_7220_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail1_LSB 0x1
+#define QIB_7220_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail0_LSB 0x0
+#define QIB_7220_IntStatus_RcvAvail0_RMASK 0x1
+
+#define QIB_7220_IntClear_OFFS 0x78
+#define QIB_7220_IntClear_SDmaIntClear_LSB 0x3F
+#define QIB_7220_IntClear_SDmaIntClear_RMASK 0x1
+#define QIB_7220_IntClear_SDmaDisabledClear_LSB 0x3E
+#define QIB_7220_IntClear_SDmaDisabledClear_RMASK 0x1
+#define QIB_7220_IntClear_Reserved_LSB 0x31
+#define QIB_7220_IntClear_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntClear_RcvUrg16IntClear_LSB 0x30
+#define QIB_7220_IntClear_RcvUrg16IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg15IntClear_LSB 0x2F
+#define QIB_7220_IntClear_RcvUrg15IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg14IntClear_LSB 0x2E
+#define QIB_7220_IntClear_RcvUrg14IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg13IntClear_LSB 0x2D
+#define QIB_7220_IntClear_RcvUrg13IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg12IntClear_LSB 0x2C
+#define QIB_7220_IntClear_RcvUrg12IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg11IntClear_LSB 0x2B
+#define QIB_7220_IntClear_RcvUrg11IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg10IntClear_LSB 0x2A
+#define QIB_7220_IntClear_RcvUrg10IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg9IntClear_LSB 0x29
+#define QIB_7220_IntClear_RcvUrg9IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg8IntClear_LSB 0x28
+#define QIB_7220_IntClear_RcvUrg8IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg7IntClear_LSB 0x27
+#define QIB_7220_IntClear_RcvUrg7IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg6IntClear_LSB 0x26
+#define QIB_7220_IntClear_RcvUrg6IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg5IntClear_LSB 0x25
+#define QIB_7220_IntClear_RcvUrg5IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg4IntClear_LSB 0x24
+#define QIB_7220_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg3IntClear_LSB 0x23
+#define QIB_7220_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg2IntClear_LSB 0x22
+#define QIB_7220_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg1IntClear_LSB 0x21
+#define QIB_7220_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg0IntClear_LSB 0x20
+#define QIB_7220_IntClear_RcvUrg0IntClear_RMASK 0x1
+#define QIB_7220_IntClear_ErrorIntClear_LSB 0x1F
+#define QIB_7220_IntClear_ErrorIntClear_RMASK 0x1
+#define QIB_7220_IntClear_PioSetIntClear_LSB 0x1E
+#define QIB_7220_IntClear_PioSetIntClear_RMASK 0x1
+#define QIB_7220_IntClear_PioBufAvailIntClear_LSB 0x1D
+#define QIB_7220_IntClear_PioBufAvailIntClear_RMASK 0x1
+#define QIB_7220_IntClear_assertGPIOIntClear_LSB 0x1C
+#define QIB_7220_IntClear_assertGPIOIntClear_RMASK 0x1
+#define QIB_7220_IntClear_IBSerdesTrimDoneClear_LSB 0x1B
+#define QIB_7220_IntClear_IBSerdesTrimDoneClear_RMASK 0x1
+#define QIB_7220_IntClear_JIntClear_LSB 0x1A
+#define QIB_7220_IntClear_JIntClear_RMASK 0x1
+#define QIB_7220_IntClear_Reserved1_LSB 0x11
+#define QIB_7220_IntClear_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntClear_RcvAvail16IntClear_LSB 0x10
+#define QIB_7220_IntClear_RcvAvail16IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail15IntClear_LSB 0xF
+#define QIB_7220_IntClear_RcvAvail15IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail14IntClear_LSB 0xE
+#define QIB_7220_IntClear_RcvAvail14IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail13IntClear_LSB 0xD
+#define QIB_7220_IntClear_RcvAvail13IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail12IntClear_LSB 0xC
+#define QIB_7220_IntClear_RcvAvail12IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail11IntClear_LSB 0xB
+#define QIB_7220_IntClear_RcvAvail11IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail10IntClear_LSB 0xA
+#define QIB_7220_IntClear_RcvAvail10IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail9IntClear_LSB 0x9
+#define QIB_7220_IntClear_RcvAvail9IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail8IntClear_LSB 0x8
+#define QIB_7220_IntClear_RcvAvail8IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail7IntClear_LSB 0x7
+#define QIB_7220_IntClear_RcvAvail7IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail6IntClear_LSB 0x6
+#define QIB_7220_IntClear_RcvAvail6IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail5IntClear_LSB 0x5
+#define QIB_7220_IntClear_RcvAvail5IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail4IntClear_LSB 0x4
+#define QIB_7220_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail3IntClear_LSB 0x3
+#define QIB_7220_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail2IntClear_LSB 0x2
+#define QIB_7220_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail1IntClear_LSB 0x1
+#define QIB_7220_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail0IntClear_LSB 0x0
+#define QIB_7220_IntClear_RcvAvail0IntClear_RMASK 0x1
+
+#define QIB_7220_ErrMask_OFFS 0x80
+#define QIB_7220_ErrMask_Reserved_LSB 0x36
+#define QIB_7220_ErrMask_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrMask_InvalidEEPCmdMask_LSB 0x35
+#define QIB_7220_ErrMask_InvalidEEPCmdMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_LSB 0x34
+#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_HardwareErrMask_LSB 0x33
+#define QIB_7220_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_ResetNegatedMask_LSB 0x32
+#define QIB_7220_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_7220_ErrMask_InvalidAddrErrMask_LSB 0x31
+#define QIB_7220_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_IBStatusChangedMask_LSB 0x30
+#define QIB_7220_ErrMask_IBStatusChangedMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_LSB 0x2F
+#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaMissingDwErrMask_LSB 0x2E
+#define QIB_7220_ErrMask_SDmaMissingDwErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDwEnErrMask_LSB 0x2D
+#define QIB_7220_ErrMask_SDmaDwEnErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaRpyTagErrMask_LSB 0x2C
+#define QIB_7220_ErrMask_SDmaRpyTagErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDma1stDescErrMask_LSB 0x2B
+#define QIB_7220_ErrMask_SDma1stDescErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaBaseErrMask_LSB 0x2A
+#define QIB_7220_ErrMask_SDmaBaseErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_LSB 0x29
+#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_LSB 0x28
+#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_LSB 0x27
+#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendBufMisuseErrMask_LSB 0x26
+#define QIB_7220_ErrMask_SendBufMisuseErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
+#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendPktLenErrMask_LSB 0x20
+#define QIB_7220_ErrMask_SendPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnderRunErrMask_LSB 0x1F
+#define QIB_7220_ErrMask_SendUnderRunErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_7220_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_7220_ErrMask_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDisabledErrMask_LSB 0x1C
+#define QIB_7220_ErrMask_SDmaDisabledErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
+#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_Reserved1_LSB 0x12
+#define QIB_7220_ErrMask_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrErrMask_LSB 0x10
+#define QIB_7220_ErrMask_RcvHdrErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrLenErrMask_LSB 0xF
+#define QIB_7220_ErrMask_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvBadTidErrMask_LSB 0xE
+#define QIB_7220_ErrMask_RcvBadTidErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_7220_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_7220_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvBadVersionErrMask_LSB 0xB
+#define QIB_7220_ErrMask_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvIBFlowErrMask_LSB 0xA
+#define QIB_7220_ErrMask_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvEBPErrMask_LSB 0x9
+#define QIB_7220_ErrMask_RcvEBPErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_7220_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_7220_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_7220_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvICRCErrMask_LSB 0x2
+#define QIB_7220_ErrMask_RcvICRCErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvVCRCErrMask_LSB 0x1
+#define QIB_7220_ErrMask_RcvVCRCErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvFormatErrMask_LSB 0x0
+#define QIB_7220_ErrMask_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_7220_ErrStatus_OFFS 0x88
+#define QIB_7220_ErrStatus_Reserved_LSB 0x36
+#define QIB_7220_ErrStatus_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrStatus_InvalidEEPCmdErr_LSB 0x35
+#define QIB_7220_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_LSB 0x34
+#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_RMASK 0x1
+#define QIB_7220_ErrStatus_HardwareErr_LSB 0x33
+#define QIB_7220_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_7220_ErrStatus_ResetNegated_LSB 0x32
+#define QIB_7220_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_7220_ErrStatus_InvalidAddrErr_LSB 0x31
+#define QIB_7220_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_7220_ErrStatus_IBStatusChanged_LSB 0x30
+#define QIB_7220_ErrStatus_IBStatusChanged_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaUnexpDataErr_LSB 0x2F
+#define QIB_7220_ErrStatus_SDmaUnexpDataErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaMissingDwErr_LSB 0x2E
+#define QIB_7220_ErrStatus_SDmaMissingDwErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDwEnErr_LSB 0x2D
+#define QIB_7220_ErrStatus_SDmaDwEnErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaRpyTagErr_LSB 0x2C
+#define QIB_7220_ErrStatus_SDmaRpyTagErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDma1stDescErr_LSB 0x2B
+#define QIB_7220_ErrStatus_SDma1stDescErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaBaseErr_LSB 0x2A
+#define QIB_7220_ErrStatus_SDmaBaseErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_LSB 0x29
+#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_LSB 0x28
+#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaGenMismatchErr_LSB 0x27
+#define QIB_7220_ErrStatus_SDmaGenMismatchErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendBufMisuseErr_LSB 0x26
+#define QIB_7220_ErrStatus_SendBufMisuseErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnsupportedVLErr_LSB 0x25
+#define QIB_7220_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendPioArmLaunchErr_LSB 0x23
+#define QIB_7220_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendDroppedDataPktErr_LSB 0x22
+#define QIB_7220_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendPktLenErr_LSB 0x20
+#define QIB_7220_ErrStatus_SendPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnderRunErr_LSB 0x1F
+#define QIB_7220_ErrStatus_SendUnderRunErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendMaxPktLenErr_LSB 0x1E
+#define QIB_7220_ErrStatus_SendMaxPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendMinPktLenErr_LSB 0x1D
+#define QIB_7220_ErrStatus_SendMinPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDisabledErr_LSB 0x1C
+#define QIB_7220_ErrStatus_SDmaDisabledErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
+#define QIB_7220_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
+#define QIB_7220_ErrStatus_Reserved1_LSB 0x12
+#define QIB_7220_ErrStatus_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrStatus_RcvIBLostLinkErr_LSB 0x11
+#define QIB_7220_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrErr_LSB 0x10
+#define QIB_7220_ErrStatus_RcvHdrErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrLenErr_LSB 0xF
+#define QIB_7220_ErrStatus_RcvHdrLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvBadTidErr_LSB 0xE
+#define QIB_7220_ErrStatus_RcvBadTidErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_7220_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_7220_ErrStatus_RcvEgrFullErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvBadVersionErr_LSB 0xB
+#define QIB_7220_ErrStatus_RcvBadVersionErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvIBFlowErr_LSB 0xA
+#define QIB_7220_ErrStatus_RcvIBFlowErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvEBPErr_LSB 0x9
+#define QIB_7220_ErrStatus_RcvEBPErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvShortPktLenErr_LSB 0x6
+#define QIB_7220_ErrStatus_RcvShortPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvLongPktLenErr_LSB 0x5
+#define QIB_7220_ErrStatus_RcvLongPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvMaxPktLenErr_LSB 0x4
+#define QIB_7220_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvMinPktLenErr_LSB 0x3
+#define QIB_7220_ErrStatus_RcvMinPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvICRCErr_LSB 0x2
+#define QIB_7220_ErrStatus_RcvICRCErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvVCRCErr_LSB 0x1
+#define QIB_7220_ErrStatus_RcvVCRCErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvFormatErr_LSB 0x0
+#define QIB_7220_ErrStatus_RcvFormatErr_RMASK 0x1
+
+#define QIB_7220_ErrClear_OFFS 0x90
+#define QIB_7220_ErrClear_Reserved_LSB 0x36
+#define QIB_7220_ErrClear_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
+#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_LSB 0x34
+#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_HardwareErrClear_LSB 0x33
+#define QIB_7220_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_ResetNegatedClear_LSB 0x32
+#define QIB_7220_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_7220_ErrClear_InvalidAddrErrClear_LSB 0x31
+#define QIB_7220_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_IBStatusChangedClear_LSB 0x30
+#define QIB_7220_ErrClear_IBStatusChangedClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_LSB 0x2F
+#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaMissingDwErrClear_LSB 0x2E
+#define QIB_7220_ErrClear_SDmaMissingDwErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDwEnErrClear_LSB 0x2D
+#define QIB_7220_ErrClear_SDmaDwEnErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaRpyTagErrClear_LSB 0x2C
+#define QIB_7220_ErrClear_SDmaRpyTagErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDma1stDescErrClear_LSB 0x2B
+#define QIB_7220_ErrClear_SDma1stDescErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaBaseErrClear_LSB 0x2A
+#define QIB_7220_ErrClear_SDmaBaseErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_LSB 0x29
+#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_LSB 0x28
+#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_LSB 0x27
+#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendBufMisuseErrClear_LSB 0x26
+#define QIB_7220_ErrClear_SendBufMisuseErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
+#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendPktLenErrClear_LSB 0x20
+#define QIB_7220_ErrClear_SendPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnderRunErrClear_LSB 0x1F
+#define QIB_7220_ErrClear_SendUnderRunErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_7220_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_7220_ErrClear_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDisabledErrClear_LSB 0x1C
+#define QIB_7220_ErrClear_SDmaDisabledErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
+#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_Reserved1_LSB 0x12
+#define QIB_7220_ErrClear_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrErrClear_LSB 0x10
+#define QIB_7220_ErrClear_RcvHdrErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrLenErrClear_LSB 0xF
+#define QIB_7220_ErrClear_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvBadTidErrClear_LSB 0xE
+#define QIB_7220_ErrClear_RcvBadTidErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_7220_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_7220_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvBadVersionErrClear_LSB 0xB
+#define QIB_7220_ErrClear_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvIBFlowErrClear_LSB 0xA
+#define QIB_7220_ErrClear_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvEBPErrClear_LSB 0x9
+#define QIB_7220_ErrClear_RcvEBPErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_7220_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_7220_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_7220_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvICRCErrClear_LSB 0x2
+#define QIB_7220_ErrClear_RcvICRCErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvVCRCErrClear_LSB 0x1
+#define QIB_7220_ErrClear_RcvVCRCErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvFormatErrClear_LSB 0x0
+#define QIB_7220_ErrClear_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_7220_HwErrMask_OFFS 0x98
+#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
+#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
+#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_LSB 0x3D
+#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
+#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_LSB 0x3B
+#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_LSB 0x3A
+#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x39
+#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x38
+#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved_LSB 0x37
+#define QIB_7220_HwErrMask_Reserved_RMASK 0x1
+#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved1_LSB 0x33
+#define QIB_7220_HwErrMask_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrMask_RXEMemParityErrMask_LSB 0x2C
+#define QIB_7220_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
+#define QIB_7220_HwErrMask_TXEMemParityErrMask_LSB 0x28
+#define QIB_7220_HwErrMask_TXEMemParityErrMask_RMASK 0xF
+#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_LSB 0x27
+#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_LSB 0x26
+#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_LSB 0x25
+#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_LSB 0x24
+#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved2_LSB 0x22
+#define QIB_7220_HwErrMask_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_7220_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_7220_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_7220_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PoisonedTLPMask_LSB 0x1D
+#define QIB_7220_HwErrMask_PoisonedTLPMask_RMASK 0x1
+#define QIB_7220_HwErrMask_SDmaMemReadErrMask_LSB 0x1C
+#define QIB_7220_HwErrMask_SDmaMemReadErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved3_LSB 0x8
+#define QIB_7220_HwErrMask_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrMask_PCIeMemParityErrMask_LSB 0x0
+#define QIB_7220_HwErrMask_PCIeMemParityErrMask_RMASK 0xFF
+
+#define QIB_7220_HwErrStatus_OFFS 0xA0
+#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
+#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_LSB 0x3D
+#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_RMASK 0x1
+#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
+#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_LSB 0x3B
+#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_LSB 0x3A
+#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x39
+#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x38
+#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved_LSB 0x37
+#define QIB_7220_HwErrStatus_Reserved_RMASK 0x1
+#define QIB_7220_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_7220_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved1_LSB 0x33
+#define QIB_7220_HwErrStatus_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrStatus_RXEMemParity_LSB 0x2C
+#define QIB_7220_HwErrStatus_RXEMemParity_RMASK 0x7F
+#define QIB_7220_HwErrStatus_TXEMemParity_LSB 0x28
+#define QIB_7220_HwErrStatus_TXEMemParity_RMASK 0xF
+#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_LSB 0x27
+#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_LSB 0x26
+#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_LSB 0x25
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_LSB 0x24
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved2_LSB 0x22
+#define QIB_7220_HwErrStatus_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_7220_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_7220_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_7220_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_7220_HwErrStatus_PoisenedTLP_LSB 0x1D
+#define QIB_7220_HwErrStatus_PoisenedTLP_RMASK 0x1
+#define QIB_7220_HwErrStatus_SDmaMemReadErr_LSB 0x1C
+#define QIB_7220_HwErrStatus_SDmaMemReadErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved3_LSB 0x8
+#define QIB_7220_HwErrStatus_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrStatus_PCIeMemParity_LSB 0x0
+#define QIB_7220_HwErrStatus_PCIeMemParity_RMASK 0xFF
+
+#define QIB_7220_HwErrClear_OFFS 0xA8
+#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
+#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
+#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_LSB 0x3D
+#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
+#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_LSB 0x3B
+#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_LSB 0x3A
+#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x39
+#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x38
+#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved_LSB 0x37
+#define QIB_7220_HwErrClear_Reserved_RMASK 0x1
+#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved1_LSB 0x33
+#define QIB_7220_HwErrClear_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrClear_RXEMemParityClear_LSB 0x2C
+#define QIB_7220_HwErrClear_RXEMemParityClear_RMASK 0x7F
+#define QIB_7220_HwErrClear_TXEMemParityClear_LSB 0x28
+#define QIB_7220_HwErrClear_TXEMemParityClear_RMASK 0xF
+#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_LSB 0x27
+#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_LSB 0x26
+#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_LSB 0x25
+#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_LSB 0x24
+#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved2_LSB 0x22
+#define QIB_7220_HwErrClear_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrClear_PCIeBusParityClr_LSB 0x1F
+#define QIB_7220_HwErrClear_PCIeBusParityClr_RMASK 0x7
+#define QIB_7220_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_7220_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PoisonedTLPClear_LSB 0x1D
+#define QIB_7220_HwErrClear_PoisonedTLPClear_RMASK 0x1
+#define QIB_7220_HwErrClear_SDmaMemReadErrClear_LSB 0x1C
+#define QIB_7220_HwErrClear_SDmaMemReadErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved3_LSB 0x8
+#define QIB_7220_HwErrClear_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrClear_PCIeMemParityClr_LSB 0x0
+#define QIB_7220_HwErrClear_PCIeMemParityClr_RMASK 0xFF
+
+#define QIB_7220_HwDiagCtrl_OFFS 0xB0
+#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
+#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_7220_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_7220_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_Reserved_LSB 0x33
+#define QIB_7220_HwDiagCtrl_Reserved_RMASK 0x1FF
+#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
+#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
+#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
+#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
+#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_LSB 0x27
+#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_LSB 0x26
+#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_LSB 0x25
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_LSB 0x24
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_Reserved1_LSB 0x23
+#define QIB_7220_HwDiagCtrl_Reserved1_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_7220_HwDiagCtrl_Reserved2_LSB 0x8
+#define QIB_7220_HwDiagCtrl_Reserved2_RMASK 0x7FFFFF
+#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
+#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_RMASK 0xFF
+
+#define QIB_7220_REG_0000B8_OFFS 0xB8
+
+#define QIB_7220_IBCStatus_OFFS 0xC0
+#define QIB_7220_IBCStatus_TxCreditOk_LSB 0x1F
+#define QIB_7220_IBCStatus_TxCreditOk_RMASK 0x1
+#define QIB_7220_IBCStatus_TxReady_LSB 0x1E
+#define QIB_7220_IBCStatus_TxReady_RMASK 0x1
+#define QIB_7220_IBCStatus_Reserved_LSB 0xE
+#define QIB_7220_IBCStatus_Reserved_RMASK 0xFFFF
+#define QIB_7220_IBCStatus_IBTxLaneReversed_LSB 0xD
+#define QIB_7220_IBCStatus_IBTxLaneReversed_RMASK 0x1
+#define QIB_7220_IBCStatus_IBRxLaneReversed_LSB 0xC
+#define QIB_7220_IBCStatus_IBRxLaneReversed_RMASK 0x1
+#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_LSB 0xB
+#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_RMASK 0x1
+#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_LSB 0xA
+#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkWidthActive_LSB 0x9
+#define QIB_7220_IBCStatus_LinkWidthActive_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkSpeedActive_LSB 0x8
+#define QIB_7220_IBCStatus_LinkSpeedActive_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkState_LSB 0x5
+#define QIB_7220_IBCStatus_LinkState_RMASK 0x7
+#define QIB_7220_IBCStatus_LinkTrainingState_LSB 0x0
+#define QIB_7220_IBCStatus_LinkTrainingState_RMASK 0x1F
+
+#define QIB_7220_IBCCtrl_OFFS 0xC8
+#define QIB_7220_IBCCtrl_Loopback_LSB 0x3F
+#define QIB_7220_IBCCtrl_Loopback_RMASK 0x1
+#define QIB_7220_IBCCtrl_LinkDownDefaultState_LSB 0x3E
+#define QIB_7220_IBCCtrl_LinkDownDefaultState_RMASK 0x1
+#define QIB_7220_IBCCtrl_Reserved_LSB 0x2B
+#define QIB_7220_IBCCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_7220_IBCCtrl_CreditScale_LSB 0x28
+#define QIB_7220_IBCCtrl_CreditScale_RMASK 0x7
+#define QIB_7220_IBCCtrl_OverrunThreshold_LSB 0x24
+#define QIB_7220_IBCCtrl_OverrunThreshold_RMASK 0xF
+#define QIB_7220_IBCCtrl_PhyerrThreshold_LSB 0x20
+#define QIB_7220_IBCCtrl_PhyerrThreshold_RMASK 0xF
+#define QIB_7220_IBCCtrl_MaxPktLen_LSB 0x15
+#define QIB_7220_IBCCtrl_MaxPktLen_RMASK 0x7FF
+#define QIB_7220_IBCCtrl_LinkCmd_LSB 0x13
+#define QIB_7220_IBCCtrl_LinkCmd_RMASK 0x3
+#define QIB_7220_IBCCtrl_LinkInitCmd_LSB 0x10
+#define QIB_7220_IBCCtrl_LinkInitCmd_RMASK 0x7
+#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
+#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_7220_IBCCtrl_FlowCtrlPeriod_LSB 0x0
+#define QIB_7220_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_7220_EXTStatus_OFFS 0xD0
+#define QIB_7220_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_7220_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_Reserved_LSB 0x20
+#define QIB_7220_EXTStatus_Reserved_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_Reserved1_LSB 0x10
+#define QIB_7220_EXTStatus_Reserved1_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_MemBISTDisabled_LSB 0xF
+#define QIB_7220_EXTStatus_MemBISTDisabled_RMASK 0x1
+#define QIB_7220_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_7220_EXTStatus_MemBISTEndTest_RMASK 0x1
+#define QIB_7220_EXTStatus_Reserved2_LSB 0x0
+#define QIB_7220_EXTStatus_Reserved2_RMASK 0x3FFF
+
+#define QIB_7220_EXTCtrl_OFFS 0xD8
+#define QIB_7220_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_7220_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_7220_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_7220_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_7220_EXTCtrl_Reserved_LSB 0x4
+#define QIB_7220_EXTCtrl_Reserved_RMASK 0xFFFFFFF
+#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
+#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
+#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
+#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDGblErrRedOff_LSB 0x0
+#define QIB_7220_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
+
+#define QIB_7220_GPIOOut_OFFS 0xE0
+
+#define QIB_7220_GPIOMask_OFFS 0xE8
+
+#define QIB_7220_GPIOStatus_OFFS 0xF0
+
+#define QIB_7220_GPIOClear_OFFS 0xF8
+
+#define QIB_7220_RcvCtrl_OFFS 0x100
+#define QIB_7220_RcvCtrl_Reserved_LSB 0x27
+#define QIB_7220_RcvCtrl_Reserved_RMASK 0x1FFFFFF
+#define QIB_7220_RcvCtrl_RcvQPMapEnable_LSB 0x26
+#define QIB_7220_RcvCtrl_RcvQPMapEnable_RMASK 0x1
+#define QIB_7220_RcvCtrl_PortCfg_LSB 0x24
+#define QIB_7220_RcvCtrl_PortCfg_RMASK 0x3
+#define QIB_7220_RcvCtrl_TailUpd_LSB 0x23
+#define QIB_7220_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_LSB 0x22
+#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_7220_RcvCtrl_IntrAvail_LSB 0x11
+#define QIB_7220_RcvCtrl_IntrAvail_RMASK 0x1FFFF
+#define QIB_7220_RcvCtrl_PortEnable_LSB 0x0
+#define QIB_7220_RcvCtrl_PortEnable_RMASK 0x1FFFF
+
+#define QIB_7220_RcvBTHQP_OFFS 0x108
+#define QIB_7220_RcvBTHQP_Reserved_LSB 0x18
+#define QIB_7220_RcvBTHQP_Reserved_RMASK 0xFF
+#define QIB_7220_RcvBTHQP_RcvBTHQP_LSB 0x0
+#define QIB_7220_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_7220_RcvHdrSize_OFFS 0x110
+
+#define QIB_7220_RcvHdrCnt_OFFS 0x118
+
+#define QIB_7220_RcvHdrEntSize_OFFS 0x120
+
+#define QIB_7220_RcvTIDBase_OFFS 0x128
+
+#define QIB_7220_RcvTIDCnt_OFFS 0x130
+
+#define QIB_7220_RcvEgrBase_OFFS 0x138
+
+#define QIB_7220_RcvEgrCnt_OFFS 0x140
+
+#define QIB_7220_RcvBufBase_OFFS 0x148
+
+#define QIB_7220_RcvBufSize_OFFS 0x150
+
+#define QIB_7220_RxIntMemBase_OFFS 0x158
+
+#define QIB_7220_RxIntMemSize_OFFS 0x160
+
+#define QIB_7220_RcvPartitionKey_OFFS 0x168
+
+#define QIB_7220_RcvQPMulticastPort_OFFS 0x170
+#define QIB_7220_RcvQPMulticastPort_Reserved_LSB 0x5
+#define QIB_7220_RcvQPMulticastPort_Reserved_RMASK 0x7FFFFFFFFFFFFFF
+#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_LSB 0x0
+#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_RMASK 0x1F
+
+#define QIB_7220_RcvPktLEDCnt_OFFS 0x178
+#define QIB_7220_RcvPktLEDCnt_ONperiod_LSB 0x20
+#define QIB_7220_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_7220_RcvPktLEDCnt_OFFperiod_LSB 0x0
+#define QIB_7220_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_7220_IBCDDRCtrl_OFFS 0x180
+#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_LSB 0x30
+#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_RMASK 0xFFFF
+#define QIB_7220_IBCDDRCtrl_IB_DLID_LSB 0x20
+#define QIB_7220_IBCDDRCtrl_IB_DLID_RMASK 0xFFFF
+#define QIB_7220_IBCDDRCtrl_Reserved_LSB 0x1B
+#define QIB_7220_IBCDDRCtrl_Reserved_RMASK 0x1F
+#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_LSB 0x1A
+#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_LSB 0x12
+#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_RMASK 0xFF
+#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_LSB 0x11
+#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_LSB 0x10
+#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_DDS_LSB 0xC
+#define QIB_7220_IBCDDRCtrl_SD_DDS_RMASK 0xF
+#define QIB_7220_IBCDDRCtrl_SD_DDSV_LSB 0xB
+#define QIB_7220_IBCDDRCtrl_SD_DDSV_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_LSB 0xA
+#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_LSB 0x9
+#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_LSB 0x8
+#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_LSB 0x7
+#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_LSB 0x5
+#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_RMASK 0x3
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_LSB 0x4
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_LSB 0x3
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_LSB 0x2
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_LSB 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_LSB 0x0
+#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_RMASK 0x1
+
+#define QIB_7220_HRTBT_GUID_OFFS 0x188
+
+#define QIB_7220_IBCDDRCtrl2_OFFS 0x1A0
+#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_LSB 0x5
+#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_RMASK 0x1F
+#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_LSB 0x0
+#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_RMASK 0x1F
+
+#define QIB_7220_IBCDDRStatus_OFFS 0x1A8
+#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_LSB 0x24
+#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_RMASK 0x1
+#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_LSB 0x20
+#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_RMASK 0xF
+#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_LSB 0x1E
+#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_RMASK 0x3
+#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_LSB 0x1A
+#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_RMASK 0xF
+#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_LSB 0x0
+#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_RMASK 0x3FFFFFF
+
+#define QIB_7220_JIntReload_OFFS 0x1B0
+#define QIB_7220_JIntReload_J_limit_reload_LSB 0x10
+#define QIB_7220_JIntReload_J_limit_reload_RMASK 0xFFFF
+#define QIB_7220_JIntReload_J_reload_LSB 0x0
+#define QIB_7220_JIntReload_J_reload_RMASK 0xFFFF
+
+#define QIB_7220_IBNCModeCtrl_OFFS 0x1B8
+#define QIB_7220_IBNCModeCtrl_Reserved_LSB 0x1A
+#define QIB_7220_IBNCModeCtrl_Reserved_RMASK 0x3FFFFFFFFF
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_LSB 0x11
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_RMASK 0x1FF
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_LSB 0x8
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_RMASK 0x1FF
+#define QIB_7220_IBNCModeCtrl_Reserved1_LSB 0x3
+#define QIB_7220_IBNCModeCtrl_Reserved1_RMASK 0x1F
+#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_LSB 0x2
+#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_LSB 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_RMASK 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_LSB 0x0
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_RMASK 0x1
+
+#define QIB_7220_SendCtrl_OFFS 0x1C0
+#define QIB_7220_SendCtrl_Disarm_LSB 0x1F
+#define QIB_7220_SendCtrl_Disarm_RMASK 0x1
+#define QIB_7220_SendCtrl_Reserved_LSB 0x1D
+#define QIB_7220_SendCtrl_Reserved_RMASK 0x3
+#define QIB_7220_SendCtrl_AvailUpdThld_LSB 0x18
+#define QIB_7220_SendCtrl_AvailUpdThld_RMASK 0x1F
+#define QIB_7220_SendCtrl_DisarmPIOBuf_LSB 0x10
+#define QIB_7220_SendCtrl_DisarmPIOBuf_RMASK 0xFF
+#define QIB_7220_SendCtrl_Reserved1_LSB 0xD
+#define QIB_7220_SendCtrl_Reserved1_RMASK 0x7
+#define QIB_7220_SendCtrl_SDmaHalt_LSB 0xC
+#define QIB_7220_SendCtrl_SDmaHalt_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaEnable_LSB 0xB
+#define QIB_7220_SendCtrl_SDmaEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaSingleDescriptor_LSB 0xA
+#define QIB_7220_SendCtrl_SDmaSingleDescriptor_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaIntEnable_LSB 0x9
+#define QIB_7220_SendCtrl_SDmaIntEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_Reserved2_LSB 0x5
+#define QIB_7220_SendCtrl_Reserved2_RMASK 0xF
+#define QIB_7220_SendCtrl_SSpecialTriggerEn_LSB 0x4
+#define QIB_7220_SendCtrl_SSpecialTriggerEn_RMASK 0x1
+#define QIB_7220_SendCtrl_SPioEnable_LSB 0x3
+#define QIB_7220_SendCtrl_SPioEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_SendBufAvailUpd_LSB 0x2
+#define QIB_7220_SendCtrl_SendBufAvailUpd_RMASK 0x1
+#define QIB_7220_SendCtrl_SendIntBufAvail_LSB 0x1
+#define QIB_7220_SendCtrl_SendIntBufAvail_RMASK 0x1
+#define QIB_7220_SendCtrl_Abort_LSB 0x0
+#define QIB_7220_SendCtrl_Abort_RMASK 0x1
+
+#define QIB_7220_SendBufBase_OFFS 0x1C8
+#define QIB_7220_SendBufBase_Reserved_LSB 0x35
+#define QIB_7220_SendBufBase_Reserved_RMASK 0x7FF
+#define QIB_7220_SendBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_7220_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_7220_SendBufBase_Reserved1_LSB 0x15
+#define QIB_7220_SendBufBase_Reserved1_RMASK 0x7FF
+#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_7220_SendBufSize_OFFS 0x1D0
+#define QIB_7220_SendBufSize_Reserved_LSB 0x2D
+#define QIB_7220_SendBufSize_Reserved_RMASK 0xFFFFF
+#define QIB_7220_SendBufSize_Size_LargePIO_LSB 0x20
+#define QIB_7220_SendBufSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_7220_SendBufSize_Reserved1_LSB 0xC
+#define QIB_7220_SendBufSize_Reserved1_RMASK 0xFFFFF
+#define QIB_7220_SendBufSize_Size_SmallPIO_LSB 0x0
+#define QIB_7220_SendBufSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_7220_SendBufCnt_OFFS 0x1D8
+#define QIB_7220_SendBufCnt_Reserved_LSB 0x24
+#define QIB_7220_SendBufCnt_Reserved_RMASK 0xFFFFFFF
+#define QIB_7220_SendBufCnt_Num_LargeBuffers_LSB 0x20
+#define QIB_7220_SendBufCnt_Num_LargeBuffers_RMASK 0xF
+#define QIB_7220_SendBufCnt_Reserved1_LSB 0x9
+#define QIB_7220_SendBufCnt_Reserved1_RMASK 0x7FFFFF
+#define QIB_7220_SendBufCnt_Num_SmallBuffers_LSB 0x0
+#define QIB_7220_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
+
+#define QIB_7220_SendBufAvailAddr_OFFS 0x1E0
+#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
+#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
+#define QIB_7220_SendBufAvailAddr_Reserved_LSB 0x0
+#define QIB_7220_SendBufAvailAddr_Reserved_RMASK 0x3F
+
+#define QIB_7220_TxIntMemBase_OFFS 0x1E8
+
+#define QIB_7220_TxIntMemSize_OFFS 0x1F0
+
+#define QIB_7220_SendDmaBase_OFFS 0x1F8
+#define QIB_7220_SendDmaBase_Reserved_LSB 0x30
+#define QIB_7220_SendDmaBase_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaBase_SendDmaBase_LSB 0x0
+#define QIB_7220_SendDmaBase_SendDmaBase_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7220_SendDmaLenGen_OFFS 0x200
+#define QIB_7220_SendDmaLenGen_Reserved_LSB 0x13
+#define QIB_7220_SendDmaLenGen_Reserved_RMASK 0x1FFFFFFFFFFF
+#define QIB_7220_SendDmaLenGen_Generation_LSB 0x10
+#define QIB_7220_SendDmaLenGen_Generation_MSB 0x12
+#define QIB_7220_SendDmaLenGen_Generation_RMASK 0x7
+#define QIB_7220_SendDmaLenGen_Length_LSB 0x0
+#define QIB_7220_SendDmaLenGen_Length_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaTail_OFFS 0x208
+#define QIB_7220_SendDmaTail_Reserved_LSB 0x10
+#define QIB_7220_SendDmaTail_Reserved_RMASK 0xFFFFFFFFFFFF
+#define QIB_7220_SendDmaTail_SendDmaTail_LSB 0x0
+#define QIB_7220_SendDmaTail_SendDmaTail_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaHead_OFFS 0x210
+#define QIB_7220_SendDmaHead_Reserved_LSB 0x30
+#define QIB_7220_SendDmaHead_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_InternalSendDmaHead_LSB 0x20
+#define QIB_7220_SendDmaHead_InternalSendDmaHead_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_Reserved1_LSB 0x10
+#define QIB_7220_SendDmaHead_Reserved1_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_SendDmaHead_LSB 0x0
+#define QIB_7220_SendDmaHead_SendDmaHead_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaHeadAddr_OFFS 0x218
+#define QIB_7220_SendDmaHeadAddr_Reserved_LSB 0x30
+#define QIB_7220_SendDmaHeadAddr_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_LSB 0x0
+#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7220_SendDmaBufMask0_OFFS 0x220
+#define QIB_7220_SendDmaBufMask0_BufMask_63_0_LSB 0x0
+#define QIB_7220_SendDmaBufMask0_BufMask_63_0_RMASK 0x0
+
+#define QIB_7220_SendDmaStatus_OFFS 0x238
+#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_LSB 0x3F
+#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_RMASK 0x1
+#define QIB_7220_SendDmaStatus_AbortInProg_LSB 0x3E
+#define QIB_7220_SendDmaStatus_AbortInProg_RMASK 0x1
+#define QIB_7220_SendDmaStatus_InternalSDmaEnable_LSB 0x3D
+#define QIB_7220_SendDmaStatus_InternalSDmaEnable_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_LSB 0x2F
+#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_RMASK 0x3FFF
+#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_LSB 0x28
+#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_RMASK 0x7F
+#define QIB_7220_SendDmaStatus_RpyTag_7_0_LSB 0x20
+#define QIB_7220_SendDmaStatus_RpyTag_7_0_RMASK 0xFF
+#define QIB_7220_SendDmaStatus_ScbFull_LSB 0x1F
+#define QIB_7220_SendDmaStatus_ScbFull_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbEmpty_LSB 0x1E
+#define QIB_7220_SendDmaStatus_ScbEmpty_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbEntryValid_LSB 0x1D
+#define QIB_7220_SendDmaStatus_ScbEntryValid_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_LSB 0x1C
+#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_LSB 0x1B
+#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoDisarmed_LSB 0x1A
+#define QIB_7220_SendDmaStatus_SplFifoDisarmed_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoEmpty_LSB 0x19
+#define QIB_7220_SendDmaStatus_SplFifoEmpty_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoFull_LSB 0x18
+#define QIB_7220_SendDmaStatus_SplFifoFull_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoBufNum_LSB 0x10
+#define QIB_7220_SendDmaStatus_SplFifoBufNum_RMASK 0xFF
+#define QIB_7220_SendDmaStatus_SplFifoDescIndex_LSB 0x0
+#define QIB_7220_SendDmaStatus_SplFifoDescIndex_RMASK 0xFFFF
+
+#define QIB_7220_SendBufErr0_OFFS 0x240
+#define QIB_7220_SendBufErr0_SendBufErr_63_0_LSB 0x0
+#define QIB_7220_SendBufErr0_SendBufErr_63_0_RMASK 0x0
+
+#define QIB_7220_RcvHdrAddr0_OFFS 0x270
+#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
+#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_7220_RcvHdrAddr0_Reserved_LSB 0x0
+#define QIB_7220_RcvHdrAddr0_Reserved_RMASK 0x3
+
+#define QIB_7220_RcvHdrTailAddr0_OFFS 0x300
+#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
+#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_7220_RcvHdrTailAddr0_Reserved_LSB 0x0
+#define QIB_7220_RcvHdrTailAddr0_Reserved_RMASK 0x3
+
+#define QIB_7220_ibsd_epb_access_ctrl_OFFS 0x3C0
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_LSB 0x8
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_RMASK 0x1
+#define QIB_7220_ibsd_epb_access_ctrl_Reserved_LSB 0x1
+#define QIB_7220_ibsd_epb_access_ctrl_Reserved_RMASK 0x7F
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_LSB 0x0
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_RMASK 0x1
+
+#define QIB_7220_ibsd_epb_transaction_reg_OFFS 0x3C8
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_LSB 0x1F
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_LSB 0x1E
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved_LSB 0x1D
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_LSB 0x1C
+#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_LSB 0x1B
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_LSB 0x19
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_RMASK 0x3
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_LSB 0x18
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_LSB 0x17
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_LSB 0x8
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_RMASK 0x7FFF
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_LSB 0x0
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_RMASK 0xFF
+
+#define QIB_7220_XGXSCfg_OFFS 0x3D8
+#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_LSB 0x3F
+#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_RMASK 0x1
+#define QIB_7220_XGXSCfg_Reserved_LSB 0x13
+#define QIB_7220_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFFF
+#define QIB_7220_XGXSCfg_link_sync_mask_LSB 0x9
+#define QIB_7220_XGXSCfg_link_sync_mask_RMASK 0x3FF
+#define QIB_7220_XGXSCfg_Reserved1_LSB 0x3
+#define QIB_7220_XGXSCfg_Reserved1_RMASK 0x3F
+#define QIB_7220_XGXSCfg_xcv_reset_LSB 0x2
+#define QIB_7220_XGXSCfg_xcv_reset_RMASK 0x1
+#define QIB_7220_XGXSCfg_Reserved2_LSB 0x1
+#define QIB_7220_XGXSCfg_Reserved2_RMASK 0x1
+#define QIB_7220_XGXSCfg_tx_rx_reset_LSB 0x0
+#define QIB_7220_XGXSCfg_tx_rx_reset_RMASK 0x1
+
+#define QIB_7220_IBSerDesCtrl_OFFS 0x3E0
+#define QIB_7220_IBSerDesCtrl_Reserved_LSB 0x2D
+#define QIB_7220_IBSerDesCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_7220_IBSerDesCtrl_INT_uC_LSB 0x2C
+#define QIB_7220_IBSerDesCtrl_INT_uC_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_CKSEL_uC_LSB 0x2A
+#define QIB_7220_IBSerDesCtrl_CKSEL_uC_RMASK 0x3
+#define QIB_7220_IBSerDesCtrl_PLLN_LSB 0x28
+#define QIB_7220_IBSerDesCtrl_PLLN_RMASK 0x3
+#define QIB_7220_IBSerDesCtrl_PLLM_LSB 0x25
+#define QIB_7220_IBSerDesCtrl_PLLM_RMASK 0x7
+#define QIB_7220_IBSerDesCtrl_TXOBPD_LSB 0x24
+#define QIB_7220_IBSerDesCtrl_TXOBPD_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_TWC_LSB 0x23
+#define QIB_7220_IBSerDesCtrl_TWC_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_RXIDLE_LSB 0x22
+#define QIB_7220_IBSerDesCtrl_RXIDLE_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_RXINV_LSB 0x21
+#define QIB_7220_IBSerDesCtrl_RXINV_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_TXINV_LSB 0x20
+#define QIB_7220_IBSerDesCtrl_TXINV_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_Reserved1_LSB 0x12
+#define QIB_7220_IBSerDesCtrl_Reserved1_RMASK 0x3FFF
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_LSB 0xD
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_RMASK 0x1F
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_LSB 0x8
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_RMASK 0x1F
+#define QIB_7220_IBSerDesCtrl_Reserved2_LSB 0x1
+#define QIB_7220_IBSerDesCtrl_Reserved2_RMASK 0x7F
+#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_LSB 0x0
+#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_RMASK 0x1
+
+#define QIB_7220_pciesd_epb_access_ctrl_OFFS 0x400
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_LSB 0x8
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_RMASK 0x1
+#define QIB_7220_pciesd_epb_access_ctrl_Reserved_LSB 0x3
+#define QIB_7220_pciesd_epb_access_ctrl_Reserved_RMASK 0x1F
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_LSB 0x1
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_RMASK 0x3
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_LSB 0x0
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_RMASK 0x1
+
+#define QIB_7220_pciesd_epb_transaction_reg_OFFS 0x408
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_LSB 0x1F
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_LSB 0x1E
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved_LSB 0x1D
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_LSB 0x1C
+#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_LSB 0x19
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_RMASK 0x7
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_LSB 0x18
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_LSB 0x17
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_LSB 0x8
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_RMASK 0x7FFF
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_LSB 0x0
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_RMASK 0xFF
+
+#define QIB_7220_SerDes_DDSRXEQ0_OFFS 0x500
+#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_LSB 0x4
+#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_RMASK 0x3F
+#define QIB_7220_SerDes_DDSRXEQ0_element_num_LSB 0x0
+#define QIB_7220_SerDes_DDSRXEQ0_element_num_RMASK 0xF
+
+#define QIB_7220_LBIntCnt_OFFS 0x13000
+
+#define QIB_7220_LBFlowStallCnt_OFFS 0x13008
+
+#define QIB_7220_TxSDmaDescCnt_OFFS 0x13010
+
+#define QIB_7220_TxUnsupVLErrCnt_OFFS 0x13018
+
+#define QIB_7220_TxDataPktCnt_OFFS 0x13020
+
+#define QIB_7220_TxFlowPktCnt_OFFS 0x13028
+
+#define QIB_7220_TxDwordCnt_OFFS 0x13030
+
+#define QIB_7220_TxLenErrCnt_OFFS 0x13038
+
+#define QIB_7220_TxMaxMinLenErrCnt_OFFS 0x13040
+
+#define QIB_7220_TxUnderrunCnt_OFFS 0x13048
+
+#define QIB_7220_TxFlowStallCnt_OFFS 0x13050
+
+#define QIB_7220_TxDroppedPktCnt_OFFS 0x13058
+
+#define QIB_7220_RxDroppedPktCnt_OFFS 0x13060
+
+#define QIB_7220_RxDataPktCnt_OFFS 0x13068
+
+#define QIB_7220_RxFlowPktCnt_OFFS 0x13070
+
+#define QIB_7220_RxDwordCnt_OFFS 0x13078
+
+#define QIB_7220_RxLenErrCnt_OFFS 0x13080
+
+#define QIB_7220_RxMaxMinLenErrCnt_OFFS 0x13088
+
+#define QIB_7220_RxICRCErrCnt_OFFS 0x13090
+
+#define QIB_7220_RxVCRCErrCnt_OFFS 0x13098
+
+#define QIB_7220_RxFlowCtrlViolCnt_OFFS 0x130A0
+
+#define QIB_7220_RxVersionErrCnt_OFFS 0x130A8
+
+#define QIB_7220_RxLinkMalformCnt_OFFS 0x130B0
+
+#define QIB_7220_RxEBPCnt_OFFS 0x130B8
+
+#define QIB_7220_RxLPCRCErrCnt_OFFS 0x130C0
+
+#define QIB_7220_RxBufOvflCnt_OFFS 0x130C8
+
+#define QIB_7220_RxTIDFullErrCnt_OFFS 0x130D0
+
+#define QIB_7220_RxTIDValidErrCnt_OFFS 0x130D8
+
+#define QIB_7220_RxPKeyMismatchCnt_OFFS 0x130E0
+
+#define QIB_7220_RxP0HdrEgrOvflCnt_OFFS 0x130E8
+
+#define QIB_7220_IBStatusChangeCnt_OFFS 0x13170
+
+#define QIB_7220_IBLinkErrRecoveryCnt_OFFS 0x13178
+
+#define QIB_7220_IBLinkDownedCnt_OFFS 0x13180
+
+#define QIB_7220_IBSymbolErrCnt_OFFS 0x13188
+
+#define QIB_7220_RxVL15DroppedPktCnt_OFFS 0x13190
+
+#define QIB_7220_RxOtherLocalPhyErrCnt_OFFS 0x13198
+
+#define QIB_7220_PcieRetryBufDiagQwordCnt_OFFS 0x131A0
+
+#define QIB_7220_ExcessBufferOvflCnt_OFFS 0x131A8
+
+#define QIB_7220_LocalLinkIntegrityErrCnt_OFFS 0x131B0
+
+#define QIB_7220_RxVlErrCnt_OFFS 0x131B8
+
+#define QIB_7220_RxDlidFltrCnt_OFFS 0x131C0
+
+#define QIB_7220_CNT_0131C8_OFFS 0x131C8
+
+#define QIB_7220_PSStat_OFFS 0x13200
+
+#define QIB_7220_PSStart_OFFS 0x13208
+
+#define QIB_7220_PSInterval_OFFS 0x13210
+
+#define QIB_7220_PSRcvDataCount_OFFS 0x13218
+
+#define QIB_7220_PSRcvPktsCount_OFFS 0x13220
+
+#define QIB_7220_PSXmitDataCount_OFFS 0x13228
+
+#define QIB_7220_PSXmitPktsCount_OFFS 0x13230
+
+#define QIB_7220_PSXmitWaitCount_OFFS 0x13238
+
+#define QIB_7220_CNT_013240_OFFS 0x13240
+
+#define QIB_7220_RcvEgrArray_OFFS 0x14000
+
+#define QIB_7220_MEM_038000_OFFS 0x38000
+
+#define QIB_7220_RcvTIDArray0_OFFS 0x53000
+
+#define QIB_7220_PIOLaunchFIFO_OFFS 0x64000
+
+#define QIB_7220_MEM_064480_OFFS 0x64480
+
+#define QIB_7220_SendPIOpbcCache_OFFS 0x64800
+
+#define QIB_7220_MEM_064C80_OFFS 0x64C80
+
+#define QIB_7220_PreLaunchFIFO_OFFS 0x65000
+
+#define QIB_7220_MEM_065080_OFFS 0x65080
+
+#define QIB_7220_ScoreBoard_OFFS 0x65400
+
+#define QIB_7220_MEM_065440_OFFS 0x65440
+
+#define QIB_7220_DescriptorFIFO_OFFS 0x65800
+
+#define QIB_7220_MEM_065880_OFFS 0x65880
+
+#define QIB_7220_RcvBuf1_OFFS 0x72000
+
+#define QIB_7220_MEM_074800_OFFS 0x74800
+
+#define QIB_7220_RcvBuf2_OFFS 0x75000
+
+#define QIB_7220_MEM_076400_OFFS 0x76400
+
+#define QIB_7220_RcvFlags_OFFS 0x77000
+
+#define QIB_7220_MEM_078400_OFFS 0x78400
+
+#define QIB_7220_RcvLookupBuf1_OFFS 0x79000
+
+#define QIB_7220_MEM_07A400_OFFS 0x7A400
+
+#define QIB_7220_RcvDMADatBuf_OFFS 0x7B000
+
+#define QIB_7220_RcvDMAHdrBuf_OFFS 0x7B800
+
+#define QIB_7220_MiscRXEIntMem_OFFS 0x7C000
+
+#define QIB_7220_MEM_07D400_OFFS 0x7D400
+
+#define QIB_7220_PCIERcvBuf_OFFS 0x80000
+
+#define QIB_7220_PCIERetryBuf_OFFS 0x84000
+
+#define QIB_7220_PCIERcvBufRdToWrAddr_OFFS 0x88000
+
+#define QIB_7220_PCIECplBuf_OFFS 0x90000
+
+#define QIB_7220_IBSerDesMappTable_OFFS 0x94000
+
+#define QIB_7220_MEM_095000_OFFS 0x95000
+
+#define QIB_7220_SendBuf0_MA_OFFS 0x100000
+
+#define QIB_7220_MEM_1A0000_OFFS 0x1A0000
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h
new file mode 100644
index 000000000000..a97440ba924c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7322_regs.h
@@ -0,0 +1,3163 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_7322_Revision_OFFS 0x0
+#define QIB_7322_Revision_DEF 0x0000000002010601
+#define QIB_7322_Revision_R_Simulator_LSB 0x3F
+#define QIB_7322_Revision_R_Simulator_MSB 0x3F
+#define QIB_7322_Revision_R_Simulator_RMASK 0x1
+#define QIB_7322_Revision_R_Emulation_LSB 0x3E
+#define QIB_7322_Revision_R_Emulation_MSB 0x3E
+#define QIB_7322_Revision_R_Emulation_RMASK 0x1
+#define QIB_7322_Revision_R_Emulation_Revcode_LSB 0x28
+#define QIB_7322_Revision_R_Emulation_Revcode_MSB 0x3D
+#define QIB_7322_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
+#define QIB_7322_Revision_BoardID_LSB 0x20
+#define QIB_7322_Revision_BoardID_MSB 0x27
+#define QIB_7322_Revision_BoardID_RMASK 0xFF
+#define QIB_7322_Revision_R_SW_LSB 0x18
+#define QIB_7322_Revision_R_SW_MSB 0x1F
+#define QIB_7322_Revision_R_SW_RMASK 0xFF
+#define QIB_7322_Revision_R_Arch_LSB 0x10
+#define QIB_7322_Revision_R_Arch_MSB 0x17
+#define QIB_7322_Revision_R_Arch_RMASK 0xFF
+#define QIB_7322_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_7322_Revision_R_ChipRevMajor_MSB 0xF
+#define QIB_7322_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_7322_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_7322_Revision_R_ChipRevMinor_MSB 0x7
+#define QIB_7322_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_7322_Control_OFFS 0x8
+#define QIB_7322_Control_DEF 0x0000000000000000
+#define QIB_7322_Control_PCIECplQDiagEn_LSB 0x6
+#define QIB_7322_Control_PCIECplQDiagEn_MSB 0x6
+#define QIB_7322_Control_PCIECplQDiagEn_RMASK 0x1
+#define QIB_7322_Control_PCIEPostQDiagEn_LSB 0x5
+#define QIB_7322_Control_PCIEPostQDiagEn_MSB 0x5
+#define QIB_7322_Control_PCIEPostQDiagEn_RMASK 0x1
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_LSB 0x4
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_MSB 0x4
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_RMASK 0x1
+#define QIB_7322_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_7322_Control_PCIERetryBufDiagEn_MSB 0x3
+#define QIB_7322_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_7322_Control_FreezeMode_LSB 0x1
+#define QIB_7322_Control_FreezeMode_MSB 0x1
+#define QIB_7322_Control_FreezeMode_RMASK 0x1
+#define QIB_7322_Control_SyncReset_LSB 0x0
+#define QIB_7322_Control_SyncReset_MSB 0x0
+#define QIB_7322_Control_SyncReset_RMASK 0x1
+
+#define QIB_7322_PageAlign_OFFS 0x10
+#define QIB_7322_PageAlign_DEF 0x0000000000001000
+
+#define QIB_7322_ContextCnt_OFFS 0x18
+#define QIB_7322_ContextCnt_DEF 0x0000000000000012
+
+#define QIB_7322_Scratch_OFFS 0x20
+#define QIB_7322_Scratch_DEF 0x0000000000000000
+
+#define QIB_7322_CntrRegBase_OFFS 0x28
+#define QIB_7322_CntrRegBase_DEF 0x0000000000011000
+
+#define QIB_7322_SendRegBase_OFFS 0x30
+#define QIB_7322_SendRegBase_DEF 0x0000000000003000
+
+#define QIB_7322_UserRegBase_OFFS 0x38
+#define QIB_7322_UserRegBase_DEF 0x0000000000200000
+
+#define QIB_7322_IntMask_OFFS 0x68
+#define QIB_7322_IntMask_DEF 0x0000000000000000
+#define QIB_7322_IntMask_SDmaIntMask_1_LSB 0x3F
+#define QIB_7322_IntMask_SDmaIntMask_1_MSB 0x3F
+#define QIB_7322_IntMask_SDmaIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIntMask_0_LSB 0x3E
+#define QIB_7322_IntMask_SDmaIntMask_0_MSB 0x3E
+#define QIB_7322_IntMask_SDmaIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_LSB 0x3D
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_MSB 0x3D
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_LSB 0x3C
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_MSB 0x3C
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_LSB 0x3B
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_MSB 0x3B
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_LSB 0x3A
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_MSB 0x3A
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_LSB 0x39
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_MSB 0x39
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_LSB 0x38
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_MSB 0x38
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg17IntMask_LSB 0x31
+#define QIB_7322_IntMask_RcvUrg17IntMask_MSB 0x31
+#define QIB_7322_IntMask_RcvUrg17IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg16IntMask_LSB 0x30
+#define QIB_7322_IntMask_RcvUrg16IntMask_MSB 0x30
+#define QIB_7322_IntMask_RcvUrg16IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg15IntMask_LSB 0x2F
+#define QIB_7322_IntMask_RcvUrg15IntMask_MSB 0x2F
+#define QIB_7322_IntMask_RcvUrg15IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg14IntMask_LSB 0x2E
+#define QIB_7322_IntMask_RcvUrg14IntMask_MSB 0x2E
+#define QIB_7322_IntMask_RcvUrg14IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg13IntMask_LSB 0x2D
+#define QIB_7322_IntMask_RcvUrg13IntMask_MSB 0x2D
+#define QIB_7322_IntMask_RcvUrg13IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg12IntMask_LSB 0x2C
+#define QIB_7322_IntMask_RcvUrg12IntMask_MSB 0x2C
+#define QIB_7322_IntMask_RcvUrg12IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg11IntMask_LSB 0x2B
+#define QIB_7322_IntMask_RcvUrg11IntMask_MSB 0x2B
+#define QIB_7322_IntMask_RcvUrg11IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg10IntMask_LSB 0x2A
+#define QIB_7322_IntMask_RcvUrg10IntMask_MSB 0x2A
+#define QIB_7322_IntMask_RcvUrg10IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg9IntMask_LSB 0x29
+#define QIB_7322_IntMask_RcvUrg9IntMask_MSB 0x29
+#define QIB_7322_IntMask_RcvUrg9IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg8IntMask_LSB 0x28
+#define QIB_7322_IntMask_RcvUrg8IntMask_MSB 0x28
+#define QIB_7322_IntMask_RcvUrg8IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg7IntMask_LSB 0x27
+#define QIB_7322_IntMask_RcvUrg7IntMask_MSB 0x27
+#define QIB_7322_IntMask_RcvUrg7IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg6IntMask_LSB 0x26
+#define QIB_7322_IntMask_RcvUrg6IntMask_MSB 0x26
+#define QIB_7322_IntMask_RcvUrg6IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg5IntMask_LSB 0x25
+#define QIB_7322_IntMask_RcvUrg5IntMask_MSB 0x25
+#define QIB_7322_IntMask_RcvUrg5IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg4IntMask_LSB 0x24
+#define QIB_7322_IntMask_RcvUrg4IntMask_MSB 0x24
+#define QIB_7322_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg3IntMask_LSB 0x23
+#define QIB_7322_IntMask_RcvUrg3IntMask_MSB 0x23
+#define QIB_7322_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg2IntMask_LSB 0x22
+#define QIB_7322_IntMask_RcvUrg2IntMask_MSB 0x22
+#define QIB_7322_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg1IntMask_LSB 0x21
+#define QIB_7322_IntMask_RcvUrg1IntMask_MSB 0x21
+#define QIB_7322_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg0IntMask_LSB 0x20
+#define QIB_7322_IntMask_RcvUrg0IntMask_MSB 0x20
+#define QIB_7322_IntMask_RcvUrg0IntMask_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_1_LSB 0x1F
+#define QIB_7322_IntMask_ErrIntMask_1_MSB 0x1F
+#define QIB_7322_IntMask_ErrIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_0_LSB 0x1E
+#define QIB_7322_IntMask_ErrIntMask_0_MSB 0x1E
+#define QIB_7322_IntMask_ErrIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_LSB 0x1D
+#define QIB_7322_IntMask_ErrIntMask_MSB 0x1D
+#define QIB_7322_IntMask_ErrIntMask_RMASK 0x1
+#define QIB_7322_IntMask_AssertGPIOIntMask_LSB 0x1C
+#define QIB_7322_IntMask_AssertGPIOIntMask_MSB 0x1C
+#define QIB_7322_IntMask_AssertGPIOIntMask_RMASK 0x1
+#define QIB_7322_IntMask_SendDoneIntMask_1_LSB 0x19
+#define QIB_7322_IntMask_SendDoneIntMask_1_MSB 0x19
+#define QIB_7322_IntMask_SendDoneIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SendDoneIntMask_0_LSB 0x18
+#define QIB_7322_IntMask_SendDoneIntMask_0_MSB 0x18
+#define QIB_7322_IntMask_SendDoneIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SendBufAvailIntMask_LSB 0x17
+#define QIB_7322_IntMask_SendBufAvailIntMask_MSB 0x17
+#define QIB_7322_IntMask_SendBufAvailIntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail17IntMask_LSB 0x11
+#define QIB_7322_IntMask_RcvAvail17IntMask_MSB 0x11
+#define QIB_7322_IntMask_RcvAvail17IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail16IntMask_LSB 0x10
+#define QIB_7322_IntMask_RcvAvail16IntMask_MSB 0x10
+#define QIB_7322_IntMask_RcvAvail16IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail15IntMask_LSB 0xF
+#define QIB_7322_IntMask_RcvAvail15IntMask_MSB 0xF
+#define QIB_7322_IntMask_RcvAvail15IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail14IntMask_LSB 0xE
+#define QIB_7322_IntMask_RcvAvail14IntMask_MSB 0xE
+#define QIB_7322_IntMask_RcvAvail14IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail13IntMask_LSB 0xD
+#define QIB_7322_IntMask_RcvAvail13IntMask_MSB 0xD
+#define QIB_7322_IntMask_RcvAvail13IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail12IntMask_LSB 0xC
+#define QIB_7322_IntMask_RcvAvail12IntMask_MSB 0xC
+#define QIB_7322_IntMask_RcvAvail12IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail11IntMask_LSB 0xB
+#define QIB_7322_IntMask_RcvAvail11IntMask_MSB 0xB
+#define QIB_7322_IntMask_RcvAvail11IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail10IntMask_LSB 0xA
+#define QIB_7322_IntMask_RcvAvail10IntMask_MSB 0xA
+#define QIB_7322_IntMask_RcvAvail10IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail9IntMask_LSB 0x9
+#define QIB_7322_IntMask_RcvAvail9IntMask_MSB 0x9
+#define QIB_7322_IntMask_RcvAvail9IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail8IntMask_LSB 0x8
+#define QIB_7322_IntMask_RcvAvail8IntMask_MSB 0x8
+#define QIB_7322_IntMask_RcvAvail8IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail7IntMask_LSB 0x7
+#define QIB_7322_IntMask_RcvAvail7IntMask_MSB 0x7
+#define QIB_7322_IntMask_RcvAvail7IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail6IntMask_LSB 0x6
+#define QIB_7322_IntMask_RcvAvail6IntMask_MSB 0x6
+#define QIB_7322_IntMask_RcvAvail6IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail5IntMask_LSB 0x5
+#define QIB_7322_IntMask_RcvAvail5IntMask_MSB 0x5
+#define QIB_7322_IntMask_RcvAvail5IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail4IntMask_LSB 0x4
+#define QIB_7322_IntMask_RcvAvail4IntMask_MSB 0x4
+#define QIB_7322_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail3IntMask_LSB 0x3
+#define QIB_7322_IntMask_RcvAvail3IntMask_MSB 0x3
+#define QIB_7322_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail2IntMask_LSB 0x2
+#define QIB_7322_IntMask_RcvAvail2IntMask_MSB 0x2
+#define QIB_7322_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_LSB 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_MSB 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail0IntMask_LSB 0x0
+#define QIB_7322_IntMask_RcvAvail0IntMask_MSB 0x0
+#define QIB_7322_IntMask_RcvAvail0IntMask_RMASK 0x1
+
+#define QIB_7322_IntStatus_OFFS 0x70
+#define QIB_7322_IntStatus_DEF 0x0000000000000000
+#define QIB_7322_IntStatus_SDmaInt_1_LSB 0x3F
+#define QIB_7322_IntStatus_SDmaInt_1_MSB 0x3F
+#define QIB_7322_IntStatus_SDmaInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaInt_0_LSB 0x3E
+#define QIB_7322_IntStatus_SDmaInt_0_MSB 0x3E
+#define QIB_7322_IntStatus_SDmaInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaProgressInt_1_LSB 0x3D
+#define QIB_7322_IntStatus_SDmaProgressInt_1_MSB 0x3D
+#define QIB_7322_IntStatus_SDmaProgressInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaProgressInt_0_LSB 0x3C
+#define QIB_7322_IntStatus_SDmaProgressInt_0_MSB 0x3C
+#define QIB_7322_IntStatus_SDmaProgressInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaIdleInt_1_LSB 0x3B
+#define QIB_7322_IntStatus_SDmaIdleInt_1_MSB 0x3B
+#define QIB_7322_IntStatus_SDmaIdleInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaIdleInt_0_LSB 0x3A
+#define QIB_7322_IntStatus_SDmaIdleInt_0_MSB 0x3A
+#define QIB_7322_IntStatus_SDmaIdleInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_LSB 0x39
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_MSB 0x39
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_LSB 0x38
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_MSB 0x38
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg17_LSB 0x31
+#define QIB_7322_IntStatus_RcvUrg17_MSB 0x31
+#define QIB_7322_IntStatus_RcvUrg17_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg16_LSB 0x30
+#define QIB_7322_IntStatus_RcvUrg16_MSB 0x30
+#define QIB_7322_IntStatus_RcvUrg16_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg15_LSB 0x2F
+#define QIB_7322_IntStatus_RcvUrg15_MSB 0x2F
+#define QIB_7322_IntStatus_RcvUrg15_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg14_LSB 0x2E
+#define QIB_7322_IntStatus_RcvUrg14_MSB 0x2E
+#define QIB_7322_IntStatus_RcvUrg14_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg13_LSB 0x2D
+#define QIB_7322_IntStatus_RcvUrg13_MSB 0x2D
+#define QIB_7322_IntStatus_RcvUrg13_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg12_LSB 0x2C
+#define QIB_7322_IntStatus_RcvUrg12_MSB 0x2C
+#define QIB_7322_IntStatus_RcvUrg12_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg11_LSB 0x2B
+#define QIB_7322_IntStatus_RcvUrg11_MSB 0x2B
+#define QIB_7322_IntStatus_RcvUrg11_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg10_LSB 0x2A
+#define QIB_7322_IntStatus_RcvUrg10_MSB 0x2A
+#define QIB_7322_IntStatus_RcvUrg10_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg9_LSB 0x29
+#define QIB_7322_IntStatus_RcvUrg9_MSB 0x29
+#define QIB_7322_IntStatus_RcvUrg9_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg8_LSB 0x28
+#define QIB_7322_IntStatus_RcvUrg8_MSB 0x28
+#define QIB_7322_IntStatus_RcvUrg8_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg7_LSB 0x27
+#define QIB_7322_IntStatus_RcvUrg7_MSB 0x27
+#define QIB_7322_IntStatus_RcvUrg7_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg6_LSB 0x26
+#define QIB_7322_IntStatus_RcvUrg6_MSB 0x26
+#define QIB_7322_IntStatus_RcvUrg6_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg5_LSB 0x25
+#define QIB_7322_IntStatus_RcvUrg5_MSB 0x25
+#define QIB_7322_IntStatus_RcvUrg5_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg4_LSB 0x24
+#define QIB_7322_IntStatus_RcvUrg4_MSB 0x24
+#define QIB_7322_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg3_LSB 0x23
+#define QIB_7322_IntStatus_RcvUrg3_MSB 0x23
+#define QIB_7322_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg2_LSB 0x22
+#define QIB_7322_IntStatus_RcvUrg2_MSB 0x22
+#define QIB_7322_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg1_LSB 0x21
+#define QIB_7322_IntStatus_RcvUrg1_MSB 0x21
+#define QIB_7322_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg0_LSB 0x20
+#define QIB_7322_IntStatus_RcvUrg0_MSB 0x20
+#define QIB_7322_IntStatus_RcvUrg0_RMASK 0x1
+#define QIB_7322_IntStatus_Err_1_LSB 0x1F
+#define QIB_7322_IntStatus_Err_1_MSB 0x1F
+#define QIB_7322_IntStatus_Err_1_RMASK 0x1
+#define QIB_7322_IntStatus_Err_0_LSB 0x1E
+#define QIB_7322_IntStatus_Err_0_MSB 0x1E
+#define QIB_7322_IntStatus_Err_0_RMASK 0x1
+#define QIB_7322_IntStatus_Err_LSB 0x1D
+#define QIB_7322_IntStatus_Err_MSB 0x1D
+#define QIB_7322_IntStatus_Err_RMASK 0x1
+#define QIB_7322_IntStatus_AssertGPIO_LSB 0x1C
+#define QIB_7322_IntStatus_AssertGPIO_MSB 0x1C
+#define QIB_7322_IntStatus_AssertGPIO_RMASK 0x1
+#define QIB_7322_IntStatus_SendDone_1_LSB 0x19
+#define QIB_7322_IntStatus_SendDone_1_MSB 0x19
+#define QIB_7322_IntStatus_SendDone_1_RMASK 0x1
+#define QIB_7322_IntStatus_SendDone_0_LSB 0x18
+#define QIB_7322_IntStatus_SendDone_0_MSB 0x18
+#define QIB_7322_IntStatus_SendDone_0_RMASK 0x1
+#define QIB_7322_IntStatus_SendBufAvail_LSB 0x17
+#define QIB_7322_IntStatus_SendBufAvail_MSB 0x17
+#define QIB_7322_IntStatus_SendBufAvail_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail17_LSB 0x11
+#define QIB_7322_IntStatus_RcvAvail17_MSB 0x11
+#define QIB_7322_IntStatus_RcvAvail17_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail16_LSB 0x10
+#define QIB_7322_IntStatus_RcvAvail16_MSB 0x10
+#define QIB_7322_IntStatus_RcvAvail16_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail15_LSB 0xF
+#define QIB_7322_IntStatus_RcvAvail15_MSB 0xF
+#define QIB_7322_IntStatus_RcvAvail15_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail14_LSB 0xE
+#define QIB_7322_IntStatus_RcvAvail14_MSB 0xE
+#define QIB_7322_IntStatus_RcvAvail14_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail13_LSB 0xD
+#define QIB_7322_IntStatus_RcvAvail13_MSB 0xD
+#define QIB_7322_IntStatus_RcvAvail13_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail12_LSB 0xC
+#define QIB_7322_IntStatus_RcvAvail12_MSB 0xC
+#define QIB_7322_IntStatus_RcvAvail12_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail11_LSB 0xB
+#define QIB_7322_IntStatus_RcvAvail11_MSB 0xB
+#define QIB_7322_IntStatus_RcvAvail11_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail10_LSB 0xA
+#define QIB_7322_IntStatus_RcvAvail10_MSB 0xA
+#define QIB_7322_IntStatus_RcvAvail10_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail9_LSB 0x9
+#define QIB_7322_IntStatus_RcvAvail9_MSB 0x9
+#define QIB_7322_IntStatus_RcvAvail9_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail8_LSB 0x8
+#define QIB_7322_IntStatus_RcvAvail8_MSB 0x8
+#define QIB_7322_IntStatus_RcvAvail8_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail7_LSB 0x7
+#define QIB_7322_IntStatus_RcvAvail7_MSB 0x7
+#define QIB_7322_IntStatus_RcvAvail7_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail6_LSB 0x6
+#define QIB_7322_IntStatus_RcvAvail6_MSB 0x6
+#define QIB_7322_IntStatus_RcvAvail6_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail5_LSB 0x5
+#define QIB_7322_IntStatus_RcvAvail5_MSB 0x5
+#define QIB_7322_IntStatus_RcvAvail5_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail4_LSB 0x4
+#define QIB_7322_IntStatus_RcvAvail4_MSB 0x4
+#define QIB_7322_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail3_LSB 0x3
+#define QIB_7322_IntStatus_RcvAvail3_MSB 0x3
+#define QIB_7322_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail2_LSB 0x2
+#define QIB_7322_IntStatus_RcvAvail2_MSB 0x2
+#define QIB_7322_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail1_LSB 0x1
+#define QIB_7322_IntStatus_RcvAvail1_MSB 0x1
+#define QIB_7322_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail0_LSB 0x0
+#define QIB_7322_IntStatus_RcvAvail0_MSB 0x0
+#define QIB_7322_IntStatus_RcvAvail0_RMASK 0x1
+
+#define QIB_7322_IntClear_OFFS 0x78
+#define QIB_7322_IntClear_DEF 0x0000000000000000
+#define QIB_7322_IntClear_SDmaIntClear_1_LSB 0x3F
+#define QIB_7322_IntClear_SDmaIntClear_1_MSB 0x3F
+#define QIB_7322_IntClear_SDmaIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIntClear_0_LSB 0x3E
+#define QIB_7322_IntClear_SDmaIntClear_0_MSB 0x3E
+#define QIB_7322_IntClear_SDmaIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_LSB 0x3D
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_MSB 0x3D
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_LSB 0x3C
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_MSB 0x3C
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_LSB 0x3B
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_MSB 0x3B
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_LSB 0x3A
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_MSB 0x3A
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_LSB 0x39
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_MSB 0x39
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_LSB 0x38
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_MSB 0x38
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg17IntClear_LSB 0x31
+#define QIB_7322_IntClear_RcvUrg17IntClear_MSB 0x31
+#define QIB_7322_IntClear_RcvUrg17IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg16IntClear_LSB 0x30
+#define QIB_7322_IntClear_RcvUrg16IntClear_MSB 0x30
+#define QIB_7322_IntClear_RcvUrg16IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg15IntClear_LSB 0x2F
+#define QIB_7322_IntClear_RcvUrg15IntClear_MSB 0x2F
+#define QIB_7322_IntClear_RcvUrg15IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg14IntClear_LSB 0x2E
+#define QIB_7322_IntClear_RcvUrg14IntClear_MSB 0x2E
+#define QIB_7322_IntClear_RcvUrg14IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg13IntClear_LSB 0x2D
+#define QIB_7322_IntClear_RcvUrg13IntClear_MSB 0x2D
+#define QIB_7322_IntClear_RcvUrg13IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg12IntClear_LSB 0x2C
+#define QIB_7322_IntClear_RcvUrg12IntClear_MSB 0x2C
+#define QIB_7322_IntClear_RcvUrg12IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg11IntClear_LSB 0x2B
+#define QIB_7322_IntClear_RcvUrg11IntClear_MSB 0x2B
+#define QIB_7322_IntClear_RcvUrg11IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg10IntClear_LSB 0x2A
+#define QIB_7322_IntClear_RcvUrg10IntClear_MSB 0x2A
+#define QIB_7322_IntClear_RcvUrg10IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg9IntClear_LSB 0x29
+#define QIB_7322_IntClear_RcvUrg9IntClear_MSB 0x29
+#define QIB_7322_IntClear_RcvUrg9IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg8IntClear_LSB 0x28
+#define QIB_7322_IntClear_RcvUrg8IntClear_MSB 0x28
+#define QIB_7322_IntClear_RcvUrg8IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg7IntClear_LSB 0x27
+#define QIB_7322_IntClear_RcvUrg7IntClear_MSB 0x27
+#define QIB_7322_IntClear_RcvUrg7IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg6IntClear_LSB 0x26
+#define QIB_7322_IntClear_RcvUrg6IntClear_MSB 0x26
+#define QIB_7322_IntClear_RcvUrg6IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg5IntClear_LSB 0x25
+#define QIB_7322_IntClear_RcvUrg5IntClear_MSB 0x25
+#define QIB_7322_IntClear_RcvUrg5IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg4IntClear_LSB 0x24
+#define QIB_7322_IntClear_RcvUrg4IntClear_MSB 0x24
+#define QIB_7322_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg3IntClear_LSB 0x23
+#define QIB_7322_IntClear_RcvUrg3IntClear_MSB 0x23
+#define QIB_7322_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg2IntClear_LSB 0x22
+#define QIB_7322_IntClear_RcvUrg2IntClear_MSB 0x22
+#define QIB_7322_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg1IntClear_LSB 0x21
+#define QIB_7322_IntClear_RcvUrg1IntClear_MSB 0x21
+#define QIB_7322_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg0IntClear_LSB 0x20
+#define QIB_7322_IntClear_RcvUrg0IntClear_MSB 0x20
+#define QIB_7322_IntClear_RcvUrg0IntClear_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_1_LSB 0x1F
+#define QIB_7322_IntClear_ErrIntClear_1_MSB 0x1F
+#define QIB_7322_IntClear_ErrIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_0_LSB 0x1E
+#define QIB_7322_IntClear_ErrIntClear_0_MSB 0x1E
+#define QIB_7322_IntClear_ErrIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_LSB 0x1D
+#define QIB_7322_IntClear_ErrIntClear_MSB 0x1D
+#define QIB_7322_IntClear_ErrIntClear_RMASK 0x1
+#define QIB_7322_IntClear_AssertGPIOIntClear_LSB 0x1C
+#define QIB_7322_IntClear_AssertGPIOIntClear_MSB 0x1C
+#define QIB_7322_IntClear_AssertGPIOIntClear_RMASK 0x1
+#define QIB_7322_IntClear_SendDoneIntClear_1_LSB 0x19
+#define QIB_7322_IntClear_SendDoneIntClear_1_MSB 0x19
+#define QIB_7322_IntClear_SendDoneIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SendDoneIntClear_0_LSB 0x18
+#define QIB_7322_IntClear_SendDoneIntClear_0_MSB 0x18
+#define QIB_7322_IntClear_SendDoneIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SendBufAvailIntClear_LSB 0x17
+#define QIB_7322_IntClear_SendBufAvailIntClear_MSB 0x17
+#define QIB_7322_IntClear_SendBufAvailIntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail17IntClear_LSB 0x11
+#define QIB_7322_IntClear_RcvAvail17IntClear_MSB 0x11
+#define QIB_7322_IntClear_RcvAvail17IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail16IntClear_LSB 0x10
+#define QIB_7322_IntClear_RcvAvail16IntClear_MSB 0x10
+#define QIB_7322_IntClear_RcvAvail16IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail15IntClear_LSB 0xF
+#define QIB_7322_IntClear_RcvAvail15IntClear_MSB 0xF
+#define QIB_7322_IntClear_RcvAvail15IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail14IntClear_LSB 0xE
+#define QIB_7322_IntClear_RcvAvail14IntClear_MSB 0xE
+#define QIB_7322_IntClear_RcvAvail14IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail13IntClear_LSB 0xD
+#define QIB_7322_IntClear_RcvAvail13IntClear_MSB 0xD
+#define QIB_7322_IntClear_RcvAvail13IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail12IntClear_LSB 0xC
+#define QIB_7322_IntClear_RcvAvail12IntClear_MSB 0xC
+#define QIB_7322_IntClear_RcvAvail12IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail11IntClear_LSB 0xB
+#define QIB_7322_IntClear_RcvAvail11IntClear_MSB 0xB
+#define QIB_7322_IntClear_RcvAvail11IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail10IntClear_LSB 0xA
+#define QIB_7322_IntClear_RcvAvail10IntClear_MSB 0xA
+#define QIB_7322_IntClear_RcvAvail10IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail9IntClear_LSB 0x9
+#define QIB_7322_IntClear_RcvAvail9IntClear_MSB 0x9
+#define QIB_7322_IntClear_RcvAvail9IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail8IntClear_LSB 0x8
+#define QIB_7322_IntClear_RcvAvail8IntClear_MSB 0x8
+#define QIB_7322_IntClear_RcvAvail8IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail7IntClear_LSB 0x7
+#define QIB_7322_IntClear_RcvAvail7IntClear_MSB 0x7
+#define QIB_7322_IntClear_RcvAvail7IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail6IntClear_LSB 0x6
+#define QIB_7322_IntClear_RcvAvail6IntClear_MSB 0x6
+#define QIB_7322_IntClear_RcvAvail6IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail5IntClear_LSB 0x5
+#define QIB_7322_IntClear_RcvAvail5IntClear_MSB 0x5
+#define QIB_7322_IntClear_RcvAvail5IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail4IntClear_LSB 0x4
+#define QIB_7322_IntClear_RcvAvail4IntClear_MSB 0x4
+#define QIB_7322_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail3IntClear_LSB 0x3
+#define QIB_7322_IntClear_RcvAvail3IntClear_MSB 0x3
+#define QIB_7322_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail2IntClear_LSB 0x2
+#define QIB_7322_IntClear_RcvAvail2IntClear_MSB 0x2
+#define QIB_7322_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_LSB 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_MSB 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail0IntClear_LSB 0x0
+#define QIB_7322_IntClear_RcvAvail0IntClear_MSB 0x0
+#define QIB_7322_IntClear_RcvAvail0IntClear_RMASK 0x1
+
+#define QIB_7322_ErrMask_OFFS 0x80
+#define QIB_7322_ErrMask_DEF 0x0000000000000000
+#define QIB_7322_ErrMask_ResetNegatedMask_LSB 0x3F
+#define QIB_7322_ErrMask_ResetNegatedMask_MSB 0x3F
+#define QIB_7322_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_7322_ErrMask_HardwareErrMask_LSB 0x3E
+#define QIB_7322_ErrMask_HardwareErrMask_MSB 0x3E
+#define QIB_7322_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_InvalidAddrErrMask_LSB 0x3D
+#define QIB_7322_ErrMask_InvalidAddrErrMask_MSB 0x3D
+#define QIB_7322_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_LSB 0x38
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_MSB 0x38
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_LSB 0x37
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_MSB 0x37
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_LSB 0x35
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_MSB 0x35
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvContextShareErrMask_LSB 0x34
+#define QIB_7322_ErrMask_RcvContextShareErrMask_MSB 0x34
+#define QIB_7322_ErrMask_RcvContextShareErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_LSB 0x24
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_MSB 0x24
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_LSB 0x23
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_MSB 0x23
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_MSB 0x1B
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_LSB 0x1A
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_MSB 0x1A
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_LSB 0x19
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_MSB 0x19
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_MSB 0xD
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_MSB 0xC
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+
+#define QIB_7322_ErrStatus_OFFS 0x88
+#define QIB_7322_ErrStatus_DEF 0x0000000000000000
+#define QIB_7322_ErrStatus_ResetNegated_LSB 0x3F
+#define QIB_7322_ErrStatus_ResetNegated_MSB 0x3F
+#define QIB_7322_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_7322_ErrStatus_HardwareErr_LSB 0x3E
+#define QIB_7322_ErrStatus_HardwareErr_MSB 0x3E
+#define QIB_7322_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_7322_ErrStatus_InvalidAddrErr_LSB 0x3D
+#define QIB_7322_ErrStatus_InvalidAddrErr_MSB 0x3D
+#define QIB_7322_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaVL15Err_LSB 0x38
+#define QIB_7322_ErrStatus_SDmaVL15Err_MSB 0x38
+#define QIB_7322_ErrStatus_SDmaVL15Err_RMASK 0x1
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_LSB 0x37
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_MSB 0x37
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_LSB 0x35
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_MSB 0x35
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvContextShareErr_LSB 0x34
+#define QIB_7322_ErrStatus_RcvContextShareErr_MSB 0x34
+#define QIB_7322_ErrStatus_RcvContextShareErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendVLMismatchErr_LSB 0x24
+#define QIB_7322_ErrStatus_SendVLMismatchErr_MSB 0x24
+#define QIB_7322_ErrStatus_SendVLMismatchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendArmLaunchErr_LSB 0x23
+#define QIB_7322_ErrStatus_SendArmLaunchErr_MSB 0x23
+#define QIB_7322_ErrStatus_SendArmLaunchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_MSB 0x1B
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_LSB 0x1A
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_MSB 0x1A
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_LSB 0x19
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_MSB 0x19
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_7322_ErrStatus_RcvHdrFullErr_MSB 0xD
+#define QIB_7322_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_7322_ErrStatus_RcvEgrFullErr_MSB 0xC
+#define QIB_7322_ErrStatus_RcvEgrFullErr_RMASK 0x1
+
+#define QIB_7322_ErrClear_OFFS 0x90
+#define QIB_7322_ErrClear_DEF 0x0000000000000000
+#define QIB_7322_ErrClear_ResetNegatedClear_LSB 0x3F
+#define QIB_7322_ErrClear_ResetNegatedClear_MSB 0x3F
+#define QIB_7322_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_7322_ErrClear_HardwareErrClear_LSB 0x3E
+#define QIB_7322_ErrClear_HardwareErrClear_MSB 0x3E
+#define QIB_7322_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_InvalidAddrErrClear_LSB 0x3D
+#define QIB_7322_ErrClear_InvalidAddrErrClear_MSB 0x3D
+#define QIB_7322_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_LSB 0x38
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_MSB 0x38
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_LSB 0x37
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_MSB 0x37
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_MSB 0x35
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvContextShareErrClear_LSB 0x34
+#define QIB_7322_ErrClear_RcvContextShareErrClear_MSB 0x34
+#define QIB_7322_ErrClear_RcvContextShareErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_LSB 0x24
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_MSB 0x24
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_LSB 0x23
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_MSB 0x23
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_MSB 0x1B
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_LSB 0x1A
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_MSB 0x1A
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_LSB 0x19
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_MSB 0x19
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_MSB 0xD
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_MSB 0xC
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+
+#define QIB_7322_HwErrMask_OFFS 0x98
+#define QIB_7322_HwErrMask_DEF 0x0000000000000000
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_LSB 0x3F
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_MSB 0x3F
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_LSB 0x3E
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_MSB 0x3E
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_LSB 0x37
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_MSB 0x37
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_MSB 0x36
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_LSB 0x35
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_MSB 0x35
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_RMASK 0x1
+#define QIB_7322_HwErrMask_MemoryErrMask_LSB 0x30
+#define QIB_7322_HwErrMask_MemoryErrMask_MSB 0x30
+#define QIB_7322_HwErrMask_MemoryErrMask_RMASK 0x1
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_MSB 0x21
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_MSB 0x1E
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_LSB 0x1D
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_MSB 0x1D
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_RMASK 0x1
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_LSB 0x1C
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_MSB 0x1C
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_LSB 0x1B
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_MSB 0x1B
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
+#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
+#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
+
+#define QIB_7322_HwErrStatus_OFFS 0xA0
+#define QIB_7322_HwErrStatus_DEF 0x0000000000000000
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_LSB 0x3F
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_MSB 0x3F
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_LSB 0x3E
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_MSB 0x3E
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_LSB 0x37
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_MSB 0x37
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_RMASK 0x1
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_MSB 0x36
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_LSB 0x35
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_MSB 0x35
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_RMASK 0x1
+#define QIB_7322_HwErrStatus_MemoryErr_LSB 0x30
+#define QIB_7322_HwErrStatus_MemoryErr_MSB 0x30
+#define QIB_7322_HwErrStatus_MemoryErr_RMASK 0x1
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_7322_HwErrStatus_PCIeBusParity_MSB 0x21
+#define QIB_7322_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_7322_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_7322_HwErrStatus_PcieCplTimeout_MSB 0x1E
+#define QIB_7322_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_LSB 0x1D
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_MSB 0x1D
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_RMASK 0x1
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_LSB 0x1C
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_MSB 0x1C
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_LSB 0x1B
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_MSB 0x1B
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
+#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
+#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
+
+#define QIB_7322_HwErrClear_OFFS 0xA8
+#define QIB_7322_HwErrClear_DEF 0x0000000000000000
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_LSB 0x3F
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_MSB 0x3F
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_LSB 0x3E
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_MSB 0x3E
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_LSB 0x37
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_MSB 0x37
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_MSB 0x36
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_LSB 0x35
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_MSB 0x35
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_RMASK 0x1
+#define QIB_7322_HwErrClear_MemoryErrClear_LSB 0x30
+#define QIB_7322_HwErrClear_MemoryErrClear_MSB 0x30
+#define QIB_7322_HwErrClear_MemoryErrClear_RMASK 0x1
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrClear_PCIeBusParityClear_LSB 0x1F
+#define QIB_7322_HwErrClear_PCIeBusParityClear_MSB 0x21
+#define QIB_7322_HwErrClear_PCIeBusParityClear_RMASK 0x7
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_MSB 0x1E
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_LSB 0x1D
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_MSB 0x1D
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_RMASK 0x1
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_LSB 0x1C
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_MSB 0x1C
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_LSB 0x1B
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_MSB 0x1B
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
+#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
+#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
+
+#define QIB_7322_HwDiagCtrl_OFFS 0xB0
+#define QIB_7322_HwDiagCtrl_DEF 0x0000000000000000
+#define QIB_7322_HwDiagCtrl_Diagnostic_LSB 0x3F
+#define QIB_7322_HwDiagCtrl_Diagnostic_MSB 0x3F
+#define QIB_7322_HwDiagCtrl_Diagnostic_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_MSB 0x3D
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_7322_HwDiagCtrl_CounterDisable_MSB 0x3C
+#define QIB_7322_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_MSB 0x22
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1
+
+#define QIB_7322_EXTStatus_OFFS 0xC0
+#define QIB_7322_EXTStatus_DEF 0x000000000000X000
+#define QIB_7322_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_7322_EXTStatus_GPIOIn_MSB 0x3F
+#define QIB_7322_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_7322_EXTStatus_MemBISTDisabled_LSB 0xF
+#define QIB_7322_EXTStatus_MemBISTDisabled_MSB 0xF
+#define QIB_7322_EXTStatus_MemBISTDisabled_RMASK 0x1
+#define QIB_7322_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_7322_EXTStatus_MemBISTEndTest_MSB 0xE
+#define QIB_7322_EXTStatus_MemBISTEndTest_RMASK 0x1
+
+#define QIB_7322_EXTCtrl_OFFS 0xC8
+#define QIB_7322_EXTCtrl_DEF 0x0000000000000000
+#define QIB_7322_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_7322_EXTCtrl_GPIOOe_MSB 0x3F
+#define QIB_7322_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_7322_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_7322_EXTCtrl_GPIOInvert_MSB 0x2F
+#define QIB_7322_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_LSB 0x3
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_MSB 0x3
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_LSB 0x2
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_MSB 0x2
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_LSB 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_MSB 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_LSB 0x0
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_MSB 0x0
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_RMASK 0x1
+
+#define QIB_7322_GPIOOut_OFFS 0xE0
+#define QIB_7322_GPIOOut_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOMask_OFFS 0xE8
+#define QIB_7322_GPIOMask_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOStatus_OFFS 0xF0
+#define QIB_7322_GPIOStatus_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOClear_OFFS 0xF8
+#define QIB_7322_GPIOClear_DEF 0x0000000000000000
+
+#define QIB_7322_RcvCtrl_OFFS 0x100
+#define QIB_7322_RcvCtrl_DEF 0x0000000000000000
+#define QIB_7322_RcvCtrl_TidReDirect_LSB 0x30
+#define QIB_7322_RcvCtrl_TidReDirect_MSB 0x3F
+#define QIB_7322_RcvCtrl_TidReDirect_RMASK 0xFFFF
+#define QIB_7322_RcvCtrl_TailUpd_LSB 0x2F
+#define QIB_7322_RcvCtrl_TailUpd_MSB 0x2F
+#define QIB_7322_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_7322_RcvCtrl_XrcTypeCode_LSB 0x2C
+#define QIB_7322_RcvCtrl_XrcTypeCode_MSB 0x2E
+#define QIB_7322_RcvCtrl_XrcTypeCode_RMASK 0x7
+#define QIB_7322_RcvCtrl_TidFlowEnable_LSB 0x2B
+#define QIB_7322_RcvCtrl_TidFlowEnable_MSB 0x2B
+#define QIB_7322_RcvCtrl_TidFlowEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_ContextCfg_LSB 0x29
+#define QIB_7322_RcvCtrl_ContextCfg_MSB 0x2A
+#define QIB_7322_RcvCtrl_ContextCfg_RMASK 0x3
+#define QIB_7322_RcvCtrl_IntrAvail_LSB 0x14
+#define QIB_7322_RcvCtrl_IntrAvail_MSB 0x25
+#define QIB_7322_RcvCtrl_IntrAvail_RMASK 0x3FFFF
+#define QIB_7322_RcvCtrl_dontDropRHQFull_LSB 0x0
+#define QIB_7322_RcvCtrl_dontDropRHQFull_MSB 0x11
+#define QIB_7322_RcvCtrl_dontDropRHQFull_RMASK 0x3FFFF
+
+#define QIB_7322_RcvHdrSize_OFFS 0x110
+#define QIB_7322_RcvHdrSize_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrCnt_OFFS 0x118
+#define QIB_7322_RcvHdrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrEntSize_OFFS 0x120
+#define QIB_7322_RcvHdrEntSize_DEF 0x0000000000000000
+
+#define QIB_7322_RcvTIDBase_OFFS 0x128
+#define QIB_7322_RcvTIDBase_DEF 0x0000000000050000
+
+#define QIB_7322_RcvTIDCnt_OFFS 0x130
+#define QIB_7322_RcvTIDCnt_DEF 0x0000000000000200
+
+#define QIB_7322_RcvEgrBase_OFFS 0x138
+#define QIB_7322_RcvEgrBase_DEF 0x0000000000014000
+
+#define QIB_7322_RcvEgrCnt_OFFS 0x140
+#define QIB_7322_RcvEgrCnt_DEF 0x0000000000001000
+
+#define QIB_7322_RcvBufBase_OFFS 0x148
+#define QIB_7322_RcvBufBase_DEF 0x0000000000080000
+
+#define QIB_7322_RcvBufSize_OFFS 0x150
+#define QIB_7322_RcvBufSize_DEF 0x0000000000005000
+
+#define QIB_7322_RxIntMemBase_OFFS 0x158
+#define QIB_7322_RxIntMemBase_DEF 0x0000000000077000
+
+#define QIB_7322_RxIntMemSize_OFFS 0x160
+#define QIB_7322_RxIntMemSize_DEF 0x0000000000007000
+
+#define QIB_7322_feature_mask_OFFS 0x190
+#define QIB_7322_feature_mask_DEF 0x00000000000000XX
+
+#define QIB_7322_active_feature_mask_OFFS 0x198
+#define QIB_7322_active_feature_mask_DEF 0x00000000000000XX
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_LSB 0x5
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_MSB 0x5
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_LSB 0x4
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_MSB 0x4
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_LSB 0x3
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_MSB 0x3
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_LSB 0x2
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_MSB 0x2
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_LSB 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_MSB 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_LSB 0x0
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_MSB 0x0
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_RMASK 0x1
+
+#define QIB_7322_SendCtrl_OFFS 0x1C0
+#define QIB_7322_SendCtrl_DEF 0x0000000000000000
+#define QIB_7322_SendCtrl_Disarm_LSB 0x1F
+#define QIB_7322_SendCtrl_Disarm_MSB 0x1F
+#define QIB_7322_SendCtrl_Disarm_RMASK 0x1
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_LSB 0x1D
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_MSB 0x1D
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_RMASK 0x1
+#define QIB_7322_SendCtrl_AvailUpdThld_LSB 0x18
+#define QIB_7322_SendCtrl_AvailUpdThld_MSB 0x1C
+#define QIB_7322_SendCtrl_AvailUpdThld_RMASK 0x1F
+#define QIB_7322_SendCtrl_DisarmSendBuf_LSB 0x10
+#define QIB_7322_SendCtrl_DisarmSendBuf_MSB 0x17
+#define QIB_7322_SendCtrl_DisarmSendBuf_RMASK 0xFF
+#define QIB_7322_SendCtrl_SpecialTriggerEn_LSB 0x4
+#define QIB_7322_SendCtrl_SpecialTriggerEn_MSB 0x4
+#define QIB_7322_SendCtrl_SpecialTriggerEn_RMASK 0x1
+#define QIB_7322_SendCtrl_SendBufAvailUpd_LSB 0x2
+#define QIB_7322_SendCtrl_SendBufAvailUpd_MSB 0x2
+#define QIB_7322_SendCtrl_SendBufAvailUpd_RMASK 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_LSB 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_MSB 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_RMASK 0x1
+
+#define QIB_7322_SendBufBase_OFFS 0x1C8
+#define QIB_7322_SendBufBase_DEF 0x0018000000100000
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_MSB 0x34
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_MSB 0x14
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_7322_SendBufSize_OFFS 0x1D0
+#define QIB_7322_SendBufSize_DEF 0x0000108000000880
+#define QIB_7322_SendBufSize_Size_LargePIO_LSB 0x20
+#define QIB_7322_SendBufSize_Size_LargePIO_MSB 0x2C
+#define QIB_7322_SendBufSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_7322_SendBufSize_Size_SmallPIO_LSB 0x0
+#define QIB_7322_SendBufSize_Size_SmallPIO_MSB 0xB
+#define QIB_7322_SendBufSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_7322_SendBufCnt_OFFS 0x1D8
+#define QIB_7322_SendBufCnt_DEF 0x0000002000000080
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_LSB 0x20
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_MSB 0x25
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_RMASK 0x3F
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_LSB 0x0
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_MSB 0x8
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
+
+#define QIB_7322_SendBufAvailAddr_OFFS 0x1E0
+#define QIB_7322_SendBufAvailAddr_DEF 0x0000000000000000
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_MSB 0x27
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
+
+#define QIB_7322_SendBufErr0_OFFS 0x240
+#define QIB_7322_SendBufErr0_DEF 0x0000000000000000
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_LSB 0x0
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_MSB 0x3F
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_RMASK 0x0
+
+#define QIB_7322_AvailUpdCount_OFFS 0x268
+#define QIB_7322_AvailUpdCount_DEF 0x0000000000000000
+#define QIB_7322_AvailUpdCount_AvailUpdCount_LSB 0x0
+#define QIB_7322_AvailUpdCount_AvailUpdCount_MSB 0x4
+#define QIB_7322_AvailUpdCount_AvailUpdCount_RMASK 0x1F
+
+#define QIB_7322_RcvHdrAddr0_OFFS 0x280
+#define QIB_7322_RcvHdrAddr0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_LSB 0x2
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_MSB 0x27
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_RMASK 0x3FFFFFFFFF
+
+#define QIB_7322_RcvHdrTailAddr0_OFFS 0x340
+#define QIB_7322_RcvHdrTailAddr0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_LSB 0x2
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_MSB 0x27
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_RMASK 0x3FFFFFFFFF
+
+#define QIB_7322_ahb_access_ctrl_OFFS 0x460
+#define QIB_7322_ahb_access_ctrl_DEF 0x0000000000000000
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_LSB 0x1
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_MSB 0x2
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_RMASK 0x3
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_LSB 0x0
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_MSB 0x0
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_RMASK 0x1
+
+#define QIB_7322_ahb_transaction_reg_OFFS 0x468
+#define QIB_7322_ahb_transaction_reg_DEF 0x0000000080000000
+#define QIB_7322_ahb_transaction_reg_ahb_data_LSB 0x20
+#define QIB_7322_ahb_transaction_reg_ahb_data_MSB 0x3F
+#define QIB_7322_ahb_transaction_reg_ahb_data_RMASK 0xFFFFFFFF
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_LSB 0x1F
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_MSB 0x1F
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_LSB 0x1E
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_MSB 0x1E
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_write_not_read_LSB 0x1B
+#define QIB_7322_ahb_transaction_reg_write_not_read_MSB 0x1B
+#define QIB_7322_ahb_transaction_reg_write_not_read_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_ahb_address_LSB 0x10
+#define QIB_7322_ahb_transaction_reg_ahb_address_MSB 0x1A
+#define QIB_7322_ahb_transaction_reg_ahb_address_RMASK 0x7FF
+
+#define QIB_7322_SPC_JTAG_ACCESS_REG_OFFS 0x470
+#define QIB_7322_SPC_JTAG_ACCESS_REG_DEF 0x0000000000000001
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_LSB 0xA
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_MSB 0xA
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_LSB 0x5
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_MSB 0x9
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_RMASK 0x1F
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_LSB 0x3
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_MSB 0x4
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_RMASK 0x3
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_LSB 0x2
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_MSB 0x2
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_LSB 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_MSB 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_LSB 0x0
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_MSB 0x0
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_RMASK 0x1
+
+#define QIB_7322_SendCheckMask0_OFFS 0x4C0
+#define QIB_7322_SendCheckMask0_DEF 0x0000000000000000
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_LSB 0x0
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_MSB 0x3F
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_RMASK 0x0
+
+#define QIB_7322_SendGRHCheckMask0_OFFS 0x4E0
+#define QIB_7322_SendGRHCheckMask0_DEF 0x0000000000000000
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_LSB 0x0
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_MSB 0x3F
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_RMASK 0x0
+
+#define QIB_7322_SendIBPacketMask0_OFFS 0x500
+#define QIB_7322_SendIBPacketMask0_DEF 0x0000000000000000
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_LSB 0x0
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_MSB 0x3F
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_RMASK 0x0
+
+#define QIB_7322_IntRedirect0_OFFS 0x540
+#define QIB_7322_IntRedirect0_DEF 0x0000000000000000
+#define QIB_7322_IntRedirect0_vec11_LSB 0x37
+#define QIB_7322_IntRedirect0_vec11_MSB 0x3B
+#define QIB_7322_IntRedirect0_vec11_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec10_LSB 0x32
+#define QIB_7322_IntRedirect0_vec10_MSB 0x36
+#define QIB_7322_IntRedirect0_vec10_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec9_LSB 0x2D
+#define QIB_7322_IntRedirect0_vec9_MSB 0x31
+#define QIB_7322_IntRedirect0_vec9_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec8_LSB 0x28
+#define QIB_7322_IntRedirect0_vec8_MSB 0x2C
+#define QIB_7322_IntRedirect0_vec8_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec7_LSB 0x23
+#define QIB_7322_IntRedirect0_vec7_MSB 0x27
+#define QIB_7322_IntRedirect0_vec7_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec6_LSB 0x1E
+#define QIB_7322_IntRedirect0_vec6_MSB 0x22
+#define QIB_7322_IntRedirect0_vec6_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec5_LSB 0x19
+#define QIB_7322_IntRedirect0_vec5_MSB 0x1D
+#define QIB_7322_IntRedirect0_vec5_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec4_LSB 0x14
+#define QIB_7322_IntRedirect0_vec4_MSB 0x18
+#define QIB_7322_IntRedirect0_vec4_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec3_LSB 0xF
+#define QIB_7322_IntRedirect0_vec3_MSB 0x13
+#define QIB_7322_IntRedirect0_vec3_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec2_LSB 0xA
+#define QIB_7322_IntRedirect0_vec2_MSB 0xE
+#define QIB_7322_IntRedirect0_vec2_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec1_LSB 0x5
+#define QIB_7322_IntRedirect0_vec1_MSB 0x9
+#define QIB_7322_IntRedirect0_vec1_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec0_LSB 0x0
+#define QIB_7322_IntRedirect0_vec0_MSB 0x4
+#define QIB_7322_IntRedirect0_vec0_RMASK 0x1F
+
+#define QIB_7322_Int_Granted_OFFS 0x570
+#define QIB_7322_Int_Granted_DEF 0x0000000000000000
+
+#define QIB_7322_vec_clr_without_int_OFFS 0x578
+#define QIB_7322_vec_clr_without_int_DEF 0x0000000000000000
+
+#define QIB_7322_DCACtrlA_OFFS 0x580
+#define QIB_7322_DCACtrlA_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_LSB 0x4
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_MSB 0x4
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_LSB 0x3
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_MSB 0x3
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_LSB 0x2
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_MSB 0x2
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_LSB 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_MSB 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_LSB 0x0
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_MSB 0x0
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_RMASK 0x1
+
+#define QIB_7322_DCACtrlB_OFFS 0x588
+#define QIB_7322_DCACtrlB_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlC_OFFS 0x590
+#define QIB_7322_DCACtrlC_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlD_OFFS 0x598
+#define QIB_7322_DCACtrlD_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlE_OFFS 0x5A0
+#define QIB_7322_DCACtrlE_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlF_OFFS 0x5A8
+#define QIB_7322_DCACtrlF_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_LSB 0x28
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_MSB 0x2F
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_RMASK 0xFF
+
+#define QIB_7322_RcvAvailTimeOut0_OFFS 0xC00
+#define QIB_7322_RcvAvailTimeOut0_DEF 0x0000000000000000
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_LSB 0x10
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_MSB 0x1F
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_RMASK 0xFFFF
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_LSB 0x0
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_MSB 0xF
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_RMASK 0xFFFF
+
+#define QIB_7322_CntrRegBase_0_OFFS 0x1028
+#define QIB_7322_CntrRegBase_0_DEF 0x0000000000012000
+
+#define QIB_7322_ErrMask_0_OFFS 0x1080
+#define QIB_7322_ErrMask_0_DEF 0x0000000000000000
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_LSB 0x3A
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_MSB 0x3A
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SHeadersErrMask_LSB 0x39
+#define QIB_7322_ErrMask_0_SHeadersErrMask_MSB 0x39
+#define QIB_7322_ErrMask_0_SHeadersErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_LSB 0x36
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_MSB 0x36
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_LSB 0x31
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_MSB 0x31
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_LSB 0x30
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_MSB 0x30
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_LSB 0x2F
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_MSB 0x2F
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_LSB 0x2E
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_MSB 0x2E
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_LSB 0x2D
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_MSB 0x2D
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_LSB 0x2C
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_MSB 0x2C
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_LSB 0x2B
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_MSB 0x2B
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_LSB 0x2A
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_MSB 0x2A
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_LSB 0x29
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_MSB 0x29
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_LSB 0x28
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_MSB 0x28
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_LSB 0x27
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_MSB 0x27
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_LSB 0x26
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_MSB 0x26
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_MSB 0x25
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_MSB 0x24
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_MSB 0x22
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_MSB 0x21
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_LSB 0x20
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_MSB 0x20
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_LSB 0x1F
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_MSB 0x1F
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_MSB 0x1E
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_MSB 0x1D
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_MSB 0x11
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_LSB 0x10
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_MSB 0x10
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_LSB 0xF
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_MSB 0xF
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_LSB 0xE
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_MSB 0xE
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_LSB 0xB
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_MSB 0xB
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_LSB 0xA
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_MSB 0xA
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_LSB 0x9
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_MSB 0x9
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_MSB 0x8
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_MSB 0x7
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_MSB 0x6
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_MSB 0x5
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_MSB 0x4
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_MSB 0x3
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_LSB 0x2
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_MSB 0x2
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_LSB 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_MSB 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_LSB 0x0
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_MSB 0x0
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_7322_ErrStatus_0_OFFS 0x1088
+#define QIB_7322_ErrStatus_0_DEF 0x0000000000000000
+#define QIB_7322_ErrStatus_0_IBStatusChanged_LSB 0x3A
+#define QIB_7322_ErrStatus_0_IBStatusChanged_MSB 0x3A
+#define QIB_7322_ErrStatus_0_IBStatusChanged_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SHeadersErr_LSB 0x39
+#define QIB_7322_ErrStatus_0_SHeadersErr_MSB 0x39
+#define QIB_7322_ErrStatus_0_SHeadersErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_LSB 0x36
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_MSB 0x36
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_LSB 0x31
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_MSB 0x31
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_LSB 0x30
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_MSB 0x30
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_LSB 0x2F
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_MSB 0x2F
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_LSB 0x2E
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_MSB 0x2E
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_LSB 0x2D
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_MSB 0x2D
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_LSB 0x2C
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_MSB 0x2C
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_LSB 0x2B
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_MSB 0x2B
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_LSB 0x2A
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_MSB 0x2A
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_LSB 0x29
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_MSB 0x29
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_LSB 0x28
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_MSB 0x28
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_LSB 0x27
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_MSB 0x27
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_LSB 0x26
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_MSB 0x26
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_LSB 0x25
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_MSB 0x25
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_MSB 0x24
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_LSB 0x22
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_MSB 0x22
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_MSB 0x21
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendPktLenErr_LSB 0x20
+#define QIB_7322_ErrStatus_0_SendPktLenErr_MSB 0x20
+#define QIB_7322_ErrStatus_0_SendPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_LSB 0x1F
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_MSB 0x1F
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_LSB 0x1E
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_MSB 0x1E
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_LSB 0x1D
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_MSB 0x1D
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_LSB 0x11
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_MSB 0x11
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvHdrErr_LSB 0x10
+#define QIB_7322_ErrStatus_0_RcvHdrErr_MSB 0x10
+#define QIB_7322_ErrStatus_0_RcvHdrErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_LSB 0xF
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_MSB 0xF
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_LSB 0xE
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_MSB 0xE
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_LSB 0xB
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_MSB 0xB
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_LSB 0xA
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_MSB 0xA
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvEBPErr_LSB 0x9
+#define QIB_7322_ErrStatus_0_RcvEBPErr_MSB 0x9
+#define QIB_7322_ErrStatus_0_RcvEBPErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_MSB 0x8
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_MSB 0x7
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_LSB 0x6
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_MSB 0x6
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_LSB 0x5
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_MSB 0x5
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_LSB 0x4
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_MSB 0x4
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_LSB 0x3
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_MSB 0x3
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvICRCErr_LSB 0x2
+#define QIB_7322_ErrStatus_0_RcvICRCErr_MSB 0x2
+#define QIB_7322_ErrStatus_0_RcvICRCErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_LSB 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_MSB 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvFormatErr_LSB 0x0
+#define QIB_7322_ErrStatus_0_RcvFormatErr_MSB 0x0
+#define QIB_7322_ErrStatus_0_RcvFormatErr_RMASK 0x1
+
+#define QIB_7322_ErrClear_0_OFFS 0x1090
+#define QIB_7322_ErrClear_0_DEF 0x0000000000000000
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_LSB 0x3A
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_MSB 0x3A
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SHeadersErrClear_LSB 0x39
+#define QIB_7322_ErrClear_0_SHeadersErrClear_MSB 0x39
+#define QIB_7322_ErrClear_0_SHeadersErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_LSB 0x36
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_MSB 0x36
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_LSB 0x31
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_MSB 0x31
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_LSB 0x30
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_MSB 0x30
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_LSB 0x2F
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_MSB 0x2F
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_LSB 0x2E
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_MSB 0x2E
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_LSB 0x2D
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_MSB 0x2D
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_LSB 0x2C
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_MSB 0x2C
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_LSB 0x2B
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_MSB 0x2B
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_LSB 0x2A
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_MSB 0x2A
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_LSB 0x29
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_MSB 0x29
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_LSB 0x28
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_MSB 0x28
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_LSB 0x27
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_MSB 0x27
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_LSB 0x26
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_MSB 0x26
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_MSB 0x25
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_MSB 0x24
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_MSB 0x22
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_MSB 0x21
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_LSB 0x20
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_MSB 0x20
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_LSB 0x1F
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_MSB 0x1F
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_MSB 0x1E
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_MSB 0x1D
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_MSB 0x11
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_LSB 0x10
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_MSB 0x10
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_LSB 0xF
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_MSB 0xF
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_LSB 0xE
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_MSB 0xE
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_LSB 0xB
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_MSB 0xB
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_LSB 0xA
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_MSB 0xA
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_LSB 0x9
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_MSB 0x9
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_MSB 0x8
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_MSB 0x7
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_MSB 0x6
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_MSB 0x5
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_MSB 0x4
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_MSB 0x3
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_LSB 0x2
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_MSB 0x2
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_LSB 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_MSB 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_LSB 0x0
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_MSB 0x0
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_7322_TXEStatus_0_OFFS 0x10B8
+#define QIB_7322_TXEStatus_0_DEF 0x0000000XC00080FF
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_LSB 0x1F
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_MSB 0x1F
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_RMASK 0x1
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_LSB 0x1E
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_MSB 0x1E
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_LSB 0xF
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_MSB 0xF
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_LSB 0x7
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_MSB 0x7
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_LSB 0x6
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_MSB 0x6
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_LSB 0x5
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_MSB 0x5
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_LSB 0x4
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_MSB 0x4
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_LSB 0x3
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_MSB 0x3
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_LSB 0x2
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_MSB 0x2
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_LSB 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_MSB 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_LSB 0x0
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_MSB 0x0
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_RMASK 0x1
+
+#define QIB_7322_RcvCtrl_0_OFFS 0x1100
+#define QIB_7322_RcvCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_LSB 0x2A
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_MSB 0x2A
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_LSB 0x29
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_MSB 0x29
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_LSB 0x28
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_MSB 0x28
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_LSB 0x27
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_MSB 0x27
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_LSB 0x2
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_MSB 0x11
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_RMASK 0xFFFF
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_LSB 0x0
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_MSB 0x0
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_RMASK 0x1
+
+#define QIB_7322_RcvBTHQP_0_OFFS 0x1108
+#define QIB_7322_RcvBTHQP_0_DEF 0x0000000000000000
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_LSB 0x0
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_MSB 0x17
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_7322_RcvQPMapTableA_0_OFFS 0x1110
+#define QIB_7322_RcvQPMapTableA_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_LSB 0x19
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_MSB 0x1D
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_LSB 0x14
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_MSB 0x18
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_LSB 0xF
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_MSB 0x13
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_LSB 0xA
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_MSB 0xE
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_LSB 0x5
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_MSB 0x9
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_LSB 0x0
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_MSB 0x4
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableB_0_OFFS 0x1118
+#define QIB_7322_RcvQPMapTableB_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_LSB 0x19
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_MSB 0x1D
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_LSB 0x14
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_MSB 0x18
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_LSB 0xF
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_MSB 0x13
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_LSB 0xA
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_MSB 0xE
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_LSB 0x5
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_MSB 0x9
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_LSB 0x0
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_MSB 0x4
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableC_0_OFFS 0x1120
+#define QIB_7322_RcvQPMapTableC_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_LSB 0x19
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_MSB 0x1D
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_LSB 0x14
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_MSB 0x18
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_LSB 0xF
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_MSB 0x13
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_LSB 0xA
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_MSB 0xE
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_LSB 0x5
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_MSB 0x9
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_LSB 0x0
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_MSB 0x4
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableD_0_OFFS 0x1128
+#define QIB_7322_RcvQPMapTableD_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_LSB 0x19
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_MSB 0x1D
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_LSB 0x14
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_MSB 0x18
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_LSB 0xF
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_MSB 0x13
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_LSB 0xA
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_MSB 0xE
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_LSB 0x5
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_MSB 0x9
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_LSB 0x0
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_MSB 0x4
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableE_0_OFFS 0x1130
+#define QIB_7322_RcvQPMapTableE_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_LSB 0x19
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_MSB 0x1D
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_LSB 0x14
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_MSB 0x18
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_LSB 0xF
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_MSB 0x13
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_LSB 0xA
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_MSB 0xE
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_LSB 0x5
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_MSB 0x9
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_LSB 0x0
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_MSB 0x4
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableF_0_OFFS 0x1138
+#define QIB_7322_RcvQPMapTableF_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_LSB 0x5
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_MSB 0x9
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_LSB 0x0
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_MSB 0x4
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_RMASK 0x1F
+
+#define QIB_7322_PSStat_0_OFFS 0x1140
+#define QIB_7322_PSStat_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSStart_0_OFFS 0x1148
+#define QIB_7322_PSStart_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSInterval_0_OFFS 0x1150
+#define QIB_7322_PSInterval_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvStatus_0_OFFS 0x1160
+#define QIB_7322_RcvStatus_0_DEF 0x0000000000000000
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_LSB 0x1
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_MSB 0x5
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_RMASK 0x1F
+#define QIB_7322_RcvStatus_0_RxPktInProgress_LSB 0x0
+#define QIB_7322_RcvStatus_0_RxPktInProgress_MSB 0x0
+#define QIB_7322_RcvStatus_0_RxPktInProgress_RMASK 0x1
+
+#define QIB_7322_RcvPartitionKey_0_OFFS 0x1168
+#define QIB_7322_RcvPartitionKey_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvQPMulticastContext_0_OFFS 0x1170
+#define QIB_7322_RcvQPMulticastContext_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_LSB 0x0
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_MSB 0x4
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_RMASK 0x1F
+
+#define QIB_7322_RcvPktLEDCnt_0_OFFS 0x1178
+#define QIB_7322_RcvPktLEDCnt_0_DEF 0x0000000000000000
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_LSB 0x20
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_MSB 0x3F
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_LSB 0x0
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_MSB 0x1F
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_7322_SendDmaIdleCnt_0_OFFS 0x1180
+#define QIB_7322_SendDmaIdleCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_LSB 0x0
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_MSB 0xF
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaReloadCnt_0_OFFS 0x1188
+#define QIB_7322_SendDmaReloadCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_LSB 0x0
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_MSB 0xF
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaDescCnt_0_OFFS 0x1190
+#define QIB_7322_SendDmaDescCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_LSB 0x0
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_MSB 0xF
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendCtrl_0_OFFS 0x11C0
+#define QIB_7322_SendCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_LSB 0xF
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_MSB 0xF
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_LSB 0xE
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_MSB 0xE
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_LSB 0xD
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_MSB 0xD
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaHalt_LSB 0xC
+#define QIB_7322_SendCtrl_0_SDmaHalt_MSB 0xC
+#define QIB_7322_SendCtrl_0_SDmaHalt_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaEnable_LSB 0xB
+#define QIB_7322_SendCtrl_0_SDmaEnable_MSB 0xB
+#define QIB_7322_SendCtrl_0_SDmaEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_LSB 0xA
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_MSB 0xA
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_LSB 0x9
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_MSB 0x9
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaCleanup_LSB 0x8
+#define QIB_7322_SendCtrl_0_SDmaCleanup_MSB 0x8
+#define QIB_7322_SendCtrl_0_SDmaCleanup_RMASK 0x1
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_LSB 0x7
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_MSB 0x7
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SendEnable_LSB 0x3
+#define QIB_7322_SendCtrl_0_SendEnable_MSB 0x3
+#define QIB_7322_SendCtrl_0_SendEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_LSB 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_MSB 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_LSB 0x0
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_MSB 0x0
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_RMASK 0x1
+
+#define QIB_7322_SendDmaBase_0_OFFS 0x11F8
+#define QIB_7322_SendDmaBase_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBase_0_SendDmaBase_LSB 0x0
+#define QIB_7322_SendDmaBase_0_SendDmaBase_MSB 0x2F
+#define QIB_7322_SendDmaBase_0_SendDmaBase_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7322_SendDmaLenGen_0_OFFS 0x1200
+#define QIB_7322_SendDmaLenGen_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaLenGen_0_Generation_LSB 0x10
+#define QIB_7322_SendDmaLenGen_0_Generation_MSB 0x12
+#define QIB_7322_SendDmaLenGen_0_Generation_RMASK 0x7
+#define QIB_7322_SendDmaLenGen_0_Length_LSB 0x0
+#define QIB_7322_SendDmaLenGen_0_Length_MSB 0xF
+#define QIB_7322_SendDmaLenGen_0_Length_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaTail_0_OFFS 0x1208
+#define QIB_7322_SendDmaTail_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaTail_0_SendDmaTail_LSB 0x0
+#define QIB_7322_SendDmaTail_0_SendDmaTail_MSB 0xF
+#define QIB_7322_SendDmaTail_0_SendDmaTail_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaHead_0_OFFS 0x1210
+#define QIB_7322_SendDmaHead_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_LSB 0x20
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_MSB 0x2F
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_RMASK 0xFFFF
+#define QIB_7322_SendDmaHead_0_SendDmaHead_LSB 0x0
+#define QIB_7322_SendDmaHead_0_SendDmaHead_MSB 0xF
+#define QIB_7322_SendDmaHead_0_SendDmaHead_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaHeadAddr_0_OFFS 0x1218
+#define QIB_7322_SendDmaHeadAddr_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_LSB 0x0
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_MSB 0x2F
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7322_SendDmaBufMask0_0_OFFS 0x1220
+#define QIB_7322_SendDmaBufMask0_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_LSB 0x0
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_MSB 0x3F
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_RMASK 0x0
+
+#define QIB_7322_SendDmaStatus_0_OFFS 0x1238
+#define QIB_7322_SendDmaStatus_0_DEF 0x0000000042000000
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_LSB 0x3F
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_MSB 0x3F
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_HaltInProg_LSB 0x3E
+#define QIB_7322_SendDmaStatus_0_HaltInProg_MSB 0x3E
+#define QIB_7322_SendDmaStatus_0_HaltInProg_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_LSB 0x3D
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_MSB 0x3D
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_LSB 0x2F
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_MSB 0x3C
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_RMASK 0x3FFF
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_LSB 0x28
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_MSB 0x2E
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_RMASK 0x7F
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_LSB 0x20
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_MSB 0x27
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_RMASK 0xFF
+#define QIB_7322_SendDmaStatus_0_ScbFull_LSB 0x1F
+#define QIB_7322_SendDmaStatus_0_ScbFull_MSB 0x1F
+#define QIB_7322_SendDmaStatus_0_ScbFull_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_LSB 0x1E
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_MSB 0x1E
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_LSB 0x1D
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_MSB 0x1D
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_LSB 0x1C
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_MSB 0x1C
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_LSB 0x1B
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_MSB 0x1B
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_LSB 0x1A
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_MSB 0x1A
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_LSB 0x19
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_MSB 0x19
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_LSB 0x18
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_MSB 0x18
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_LSB 0x10
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_MSB 0x17
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_RMASK 0xFF
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_LSB 0x0
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_MSB 0xF
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaPriorityThld_0_OFFS 0x1258
+#define QIB_7322_SendDmaPriorityThld_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_LSB 0x0
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_MSB 0x3
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_RMASK 0xF
+
+#define QIB_7322_SendHdrErrSymptom_0_OFFS 0x1260
+#define QIB_7322_SendHdrErrSymptom_0_DEF 0x0000000000000000
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_LSB 0x6
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_MSB 0x6
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_LSB 0x5
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_MSB 0x5
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_LSB 0x4
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_MSB 0x4
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_LSB 0x3
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_MSB 0x3
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_LSB 0x2
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_MSB 0x2
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_LSB 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_MSB 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_LSB 0x0
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_MSB 0x0
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_RMASK 0x1
+
+#define QIB_7322_RxCreditVL0_0_OFFS 0x1280
+#define QIB_7322_RxCreditVL0_0_DEF 0x0000000000000000
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_LSB 0x10
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_MSB 0x1B
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_RMASK 0xFFF
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_LSB 0x0
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_MSB 0xB
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_RMASK 0xFFF
+
+#define QIB_7322_SendDmaBufUsed0_0_OFFS 0x1480
+#define QIB_7322_SendDmaBufUsed0_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_LSB 0x0
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_MSB 0x3F
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_RMASK 0x0
+
+#define QIB_7322_SendCheckControl_0_OFFS 0x14A8
+#define QIB_7322_SendCheckControl_0_DEF 0x0000000000000000
+#define QIB_7322_SendCheckControl_0_PKey_En_LSB 0x4
+#define QIB_7322_SendCheckControl_0_PKey_En_MSB 0x4
+#define QIB_7322_SendCheckControl_0_PKey_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_BTHQP_En_LSB 0x3
+#define QIB_7322_SendCheckControl_0_BTHQP_En_MSB 0x3
+#define QIB_7322_SendCheckControl_0_BTHQP_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_SLID_En_LSB 0x2
+#define QIB_7322_SendCheckControl_0_SLID_En_MSB 0x2
+#define QIB_7322_SendCheckControl_0_SLID_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_LSB 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_MSB 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_LSB 0x0
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_MSB 0x0
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_RMASK 0x1
+
+#define QIB_7322_SendIBSLIDMask_0_OFFS 0x14B0
+#define QIB_7322_SendIBSLIDMask_0_DEF 0x0000000000000000
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_LSB 0x0
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_MSB 0xF
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK 0xFFFF
+
+#define QIB_7322_SendIBSLIDAssign_0_OFFS 0x14B8
+#define QIB_7322_SendIBSLIDAssign_0_DEF 0x0000000000000000
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_LSB 0x0
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_MSB 0xF
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK 0xFFFF
+
+#define QIB_7322_IBCStatusA_0_OFFS 0x1540
+#define QIB_7322_IBCStatusA_0_DEF 0x0000000000000X02
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_LSB 0x27
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_MSB 0x27
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_LSB 0x26
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_MSB 0x26
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_LSB 0x25
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_MSB 0x25
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_LSB 0x24
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_MSB 0x24
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_LSB 0x23
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_MSB 0x23
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_LSB 0x22
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_MSB 0x22
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_LSB 0x21
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_MSB 0x21
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_LSB 0x20
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_MSB 0x20
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxReady_LSB 0x1E
+#define QIB_7322_IBCStatusA_0_TxReady_MSB 0x1E
+#define QIB_7322_IBCStatusA_0_TxReady_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_LSB 0x1D
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_MSB 0x1D
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_LSB 0xF
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_MSB 0xF
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_ScrambleEn_LSB 0xE
+#define QIB_7322_IBCStatusA_0_ScrambleEn_MSB 0xE
+#define QIB_7322_IBCStatusA_0_ScrambleEn_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_LSB 0xD
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_MSB 0xD
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_LSB 0xC
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_MSB 0xC
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_LSB 0xA
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_MSB 0xA
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_LSB 0x9
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_MSB 0x9
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_LSB 0x8
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_MSB 0x8
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkState_LSB 0x5
+#define QIB_7322_IBCStatusA_0_LinkState_MSB 0x7
+#define QIB_7322_IBCStatusA_0_LinkState_RMASK 0x7
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_LSB 0x0
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_MSB 0x4
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_RMASK 0x1F
+
+#define QIB_7322_IBCStatusB_0_OFFS 0x1548
+#define QIB_7322_IBCStatusB_0_DEF 0x00000000XXXXXXXX
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_LSB 0x27
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_MSB 0x27
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_LSB 0x26
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_MSB 0x26
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_LSB 0x25
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_MSB 0x25
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_LSB 0x24
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_MSB 0x24
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_LSB 0x20
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_MSB 0x23
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_RMASK 0xF
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_LSB 0x1E
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_MSB 0x1F
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_RMASK 0x3
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_LSB 0x1A
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_MSB 0x1D
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_RMASK 0xF
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_LSB 0x0
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_MSB 0x19
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_RMASK 0x3FFFFFF
+
+#define QIB_7322_IBCCtrlA_0_OFFS 0x1560
+#define QIB_7322_IBCCtrlA_0_DEF 0x0000000000000000
+#define QIB_7322_IBCCtrlA_0_Loopback_LSB 0x3F
+#define QIB_7322_IBCCtrlA_0_Loopback_MSB 0x3F
+#define QIB_7322_IBCCtrlA_0_Loopback_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_LSB 0x3E
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_MSB 0x3E
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_LSB 0x3D
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_MSB 0x3D
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_LSB 0x3C
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_MSB 0x3C
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_NumVLane_LSB 0x30
+#define QIB_7322_IBCCtrlA_0_NumVLane_MSB 0x32
+#define QIB_7322_IBCCtrlA_0_NumVLane_RMASK 0x7
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_LSB 0x24
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_MSB 0x27
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_RMASK 0xF
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_LSB 0x20
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_MSB 0x23
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_RMASK 0xF
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_LSB 0x15
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_MSB 0x1F
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_RMASK 0x7FF
+#define QIB_7322_IBCCtrlA_0_LinkCmd_LSB 0x13
+#define QIB_7322_IBCCtrlA_0_LinkCmd_MSB 0x14
+#define QIB_7322_IBCCtrlA_0_LinkCmd_RMASK 0x3
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_LSB 0x10
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_MSB 0x12
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_RMASK 0x7
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_LSB 0x8
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_MSB 0xF
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_LSB 0x0
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_MSB 0x7
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_7322_IBCCtrlB_0_OFFS 0x1568
+#define QIB_7322_IBCCtrlB_0_DEF 0x00000000000305FF
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_LSB 0x30
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_MSB 0x3F
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK 0xFFFF
+#define QIB_7322_IBCCtrlB_0_IB_DLID_LSB 0x20
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MSB 0x2F
+#define QIB_7322_IBCCtrlB_0_IB_DLID_RMASK 0xFFFF
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_LSB 0x1B
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_MSB 0x1B
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_LSB 0x1A
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_MSB 0x1A
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_LSB 0x12
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_MSB 0x19
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_RMASK 0xFF
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_LSB 0x11
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_MSB 0x11
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_LSB 0x10
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_MSB 0x10
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_DDS_LSB 0xC
+#define QIB_7322_IBCCtrlB_0_SD_DDS_MSB 0xF
+#define QIB_7322_IBCCtrlB_0_SD_DDS_RMASK 0xF
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_LSB 0xB
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_MSB 0xB
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_LSB 0xA
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_MSB 0xA
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_LSB 0x9
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_MSB 0x9
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_LSB 0x8
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_MSB 0x8
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_LSB 0x7
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_MSB 0x7
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_LSB 0x5
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_MSB 0x6
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_RMASK 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_LSB 0x4
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_MSB 0x4
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_LSB 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_MSB 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_LSB 0x2
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_MSB 0x2
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_LSB 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_MSB 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_LSB 0x0
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_MSB 0x0
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_RMASK 0x1
+
+#define QIB_7322_IBCCtrlC_0_OFFS 0x1570
+#define QIB_7322_IBCCtrlC_0_DEF 0x0000000000000301
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_LSB 0x5
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_MSB 0x9
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_RMASK 0x1F
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_LSB 0x0
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_MSB 0x4
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_RMASK 0x1F
+
+#define QIB_7322_HRTBT_GUID_0_OFFS 0x1588
+#define QIB_7322_HRTBT_GUID_0_DEF 0x0000000000000000
+
+#define QIB_7322_IB_SDTEST_IF_TX_0_OFFS 0x1590
+#define QIB_7322_IB_SDTEST_IF_TX_0_DEF 0x0000000000000000
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_LSB 0x30
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_MSB 0x3F
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_LSB 0x20
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_MSB 0x2F
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_LSB 0xD
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_MSB 0xF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_RMASK 0x7
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_LSB 0xB
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_MSB 0xC
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_RMASK 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_LSB 0x4
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_MSB 0x4
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_LSB 0x2
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_MSB 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_RMASK 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_LSB 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_MSB 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_LSB 0x0
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_MSB 0x0
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_RMASK 0x1
+
+#define QIB_7322_IB_SDTEST_IF_RX_0_OFFS 0x1598
+#define QIB_7322_IB_SDTEST_IF_RX_0_DEF 0x0000000000000000
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_LSB 0x30
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_MSB 0x3F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_LSB 0x20
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_MSB 0x2F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_LSB 0x18
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_MSB 0x1F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_RMASK 0xFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_LSB 0x10
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_MSB 0x17
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_RMASK 0xFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_LSB 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_MSB 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_LSB 0x0
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_MSB 0x0
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_RMASK 0x1
+
+#define QIB_7322_IBNCModeCtrl_0_OFFS 0x15B8
+#define QIB_7322_IBNCModeCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_LSB 0x22
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_MSB 0x22
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_LSB 0x21
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_MSB 0x21
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_LSB 0x20
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_MSB 0x20
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_LSB 0x11
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_MSB 0x19
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_RMASK 0x1FF
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_LSB 0x8
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_MSB 0x10
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_RMASK 0x1FF
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_LSB 0x2
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_MSB 0x2
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_LSB 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_MSB 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_LSB 0x0
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_MSB 0x0
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_RMASK 0x1
+
+#define QIB_7322_IBSerdesStatus_0_OFFS 0x15D0
+#define QIB_7322_IBSerdesStatus_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBPCSConfig_0_OFFS 0x15D8
+#define QIB_7322_IBPCSConfig_0_DEF 0x0000000000000007
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_LSB 0x9
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_MSB 0x12
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_RMASK 0x3FF
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_LSB 0x2
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_MSB 0x2
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_RMASK 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_LSB 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_MSB 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_RMASK 0x1
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_LSB 0x0
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_MSB 0x0
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_RMASK 0x1
+
+#define QIB_7322_IBSerdesCtrl_0_OFFS 0x15E0
+#define QIB_7322_IBSerdesCtrl_0_DEF 0x0000000000FFA00F
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_LSB 0x1A
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_MSB 0x1A
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_LSB 0x19
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_MSB 0x19
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_LSB 0x18
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_MSB 0x18
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_LSB 0x14
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_MSB 0x17
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_RMASK 0xF
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_LSB 0x10
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_MSB 0x13
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_RMASK 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_LSB 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_MSB 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_LSB 0xD
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_MSB 0xD
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_LPEN_LSB 0xC
+#define QIB_7322_IBSerdesCtrl_0_LPEN_MSB 0xC
+#define QIB_7322_IBSerdesCtrl_0_LPEN_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_LSB 0xB
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_MSB 0xB
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_TXPD_LSB 0xA
+#define QIB_7322_IBSerdesCtrl_0_TXPD_MSB 0xA
+#define QIB_7322_IBSerdesCtrl_0_TXPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_RXPD_LSB 0x9
+#define QIB_7322_IBSerdesCtrl_0_RXPD_MSB 0x9
+#define QIB_7322_IBSerdesCtrl_0_RXPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_LSB 0x8
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_MSB 0x8
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_CMODE_LSB 0x0
+#define QIB_7322_IBSerdesCtrl_0_CMODE_MSB 0x6
+#define QIB_7322_IBSerdesCtrl_0_CMODE_RMASK 0x7F
+
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_OFFS 0x1600
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_DEF 0x0000000000000000
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_LSB 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_MSB 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_RMASK 0x1
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_LSB 0x1E
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_MSB 0x1E
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_RMASK 0x1
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_LSB 0xE
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_MSB 0x11
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_RMASK 0xF
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_LSB 0x9
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_MSB 0xD
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_RMASK 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_LSB 0x5
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_MSB 0x8
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_RMASK 0xF
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_LSB 0x3
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_MSB 0x4
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_RMASK 0x3
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_LSB 0x0
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_MSB 0x2
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_RMASK 0x7
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_OFFS 0x1640
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_OFFS 0x1648
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_OFFS 0x1650
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_OFFS 0x1658
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_OFFS 0x1660
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_OFFS 0x1668
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_OFFS 0x1670
+#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_DEF 0x0000000000000000
+
+#define QIB_7322_HighPriorityLimit_0_OFFS 0x1BC0
+#define QIB_7322_HighPriorityLimit_0_DEF 0x0000000000000000
+#define QIB_7322_HighPriorityLimit_0_Limit_LSB 0x0
+#define QIB_7322_HighPriorityLimit_0_Limit_MSB 0x7
+#define QIB_7322_HighPriorityLimit_0_Limit_RMASK 0xFF
+
+#define QIB_7322_LowPriority0_0_OFFS 0x1C00
+#define QIB_7322_LowPriority0_0_DEF 0x0000000000000000
+#define QIB_7322_LowPriority0_0_VirtualLane_LSB 0x10
+#define QIB_7322_LowPriority0_0_VirtualLane_MSB 0x12
+#define QIB_7322_LowPriority0_0_VirtualLane_RMASK 0x7
+#define QIB_7322_LowPriority0_0_Weight_LSB 0x0
+#define QIB_7322_LowPriority0_0_Weight_MSB 0x7
+#define QIB_7322_LowPriority0_0_Weight_RMASK 0xFF
+
+#define QIB_7322_HighPriority0_0_OFFS 0x1E00
+#define QIB_7322_HighPriority0_0_DEF 0x0000000000000000
+#define QIB_7322_HighPriority0_0_VirtualLane_LSB 0x10
+#define QIB_7322_HighPriority0_0_VirtualLane_MSB 0x12
+#define QIB_7322_HighPriority0_0_VirtualLane_RMASK 0x7
+#define QIB_7322_HighPriority0_0_Weight_LSB 0x0
+#define QIB_7322_HighPriority0_0_Weight_MSB 0x7
+#define QIB_7322_HighPriority0_0_Weight_RMASK 0xFF
+
+#define QIB_7322_CntrRegBase_1_OFFS 0x2028
+#define QIB_7322_CntrRegBase_1_DEF 0x0000000000013000
+
+#define QIB_7322_RcvQPMulticastContext_1_OFFS 0x2170
+
+#define QIB_7322_SendCtrl_1_OFFS 0x21C0
+
+#define QIB_7322_SendBufAvail0_OFFS 0x3000
+#define QIB_7322_SendBufAvail0_DEF 0x0000000000000000
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_LSB 0x0
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_MSB 0x3F
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_RMASK 0x0
+
+#define QIB_7322_MsixTable_OFFS 0x8000
+#define QIB_7322_MsixTable_DEF 0x0000000000000000
+
+#define QIB_7322_MsixPba_OFFS 0x9000
+#define QIB_7322_MsixPba_DEF 0x0000000000000000
+
+#define QIB_7322_LAMemory_OFFS 0xA000
+#define QIB_7322_LAMemory_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_OFFS 0x11000
+#define QIB_7322_LBIntCnt_DEF 0x0000000000000000
+
+#define QIB_7322_LBFlowStallCnt_OFFS 0x11008
+#define QIB_7322_LBFlowStallCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTIDFullErrCnt_OFFS 0x110D0
+#define QIB_7322_RxTIDFullErrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTIDValidErrCnt_OFFS 0x110D8
+#define QIB_7322_RxTIDValidErrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxP0HdrEgrOvflCnt_OFFS 0x110E8
+#define QIB_7322_RxP0HdrEgrOvflCnt_DEF 0x0000000000000000
+
+#define QIB_7322_PcieRetryBufDiagQwordCnt_OFFS 0x111A0
+#define QIB_7322_PcieRetryBufDiagQwordCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTidFlowDropCnt_OFFS 0x111E0
+#define QIB_7322_RxTidFlowDropCnt_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_0_OFFS 0x12000
+#define QIB_7322_LBIntCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxCreditUpToDateTimeOut_0_OFFS 0x12008
+#define QIB_7322_TxCreditUpToDateTimeOut_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxSDmaDescCnt_0_OFFS 0x12010
+#define QIB_7322_TxSDmaDescCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnsupVLErrCnt_0_OFFS 0x12018
+#define QIB_7322_TxUnsupVLErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDataPktCnt_0_OFFS 0x12020
+#define QIB_7322_TxDataPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowPktCnt_0_OFFS 0x12028
+#define QIB_7322_TxFlowPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDwordCnt_0_OFFS 0x12030
+#define QIB_7322_TxDwordCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxLenErrCnt_0_OFFS 0x12038
+#define QIB_7322_TxLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxMaxMinLenErrCnt_0_OFFS 0x12040
+#define QIB_7322_TxMaxMinLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnderrunCnt_0_OFFS 0x12048
+#define QIB_7322_TxUnderrunCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowStallCnt_0_OFFS 0x12050
+#define QIB_7322_TxFlowStallCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDroppedPktCnt_0_OFFS 0x12058
+#define QIB_7322_TxDroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDroppedPktCnt_0_OFFS 0x12060
+#define QIB_7322_RxDroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDataPktCnt_0_OFFS 0x12068
+#define QIB_7322_RxDataPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowPktCnt_0_OFFS 0x12070
+#define QIB_7322_RxFlowPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDwordCnt_0_OFFS 0x12078
+#define QIB_7322_RxDwordCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenErrCnt_0_OFFS 0x12080
+#define QIB_7322_RxLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxMaxMinLenErrCnt_0_OFFS 0x12088
+#define QIB_7322_RxMaxMinLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxICRCErrCnt_0_OFFS 0x12090
+#define QIB_7322_RxICRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVCRCErrCnt_0_OFFS 0x12098
+#define QIB_7322_RxVCRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowCtrlViolCnt_0_OFFS 0x120A0
+#define QIB_7322_RxFlowCtrlViolCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVersionErrCnt_0_OFFS 0x120A8
+#define QIB_7322_RxVersionErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLinkMalformCnt_0_OFFS 0x120B0
+#define QIB_7322_RxLinkMalformCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxEBPCnt_0_OFFS 0x120B8
+#define QIB_7322_RxEBPCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLPCRCErrCnt_0_OFFS 0x120C0
+#define QIB_7322_RxLPCRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxBufOvflCnt_0_OFFS 0x120C8
+#define QIB_7322_RxBufOvflCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenTruncateCnt_0_OFFS 0x120D0
+#define QIB_7322_RxLenTruncateCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxPKeyMismatchCnt_0_OFFS 0x120E0
+#define QIB_7322_RxPKeyMismatchCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkDownedCnt_0_OFFS 0x12180
+#define QIB_7322_IBLinkDownedCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBSymbolErrCnt_0_OFFS 0x12188
+#define QIB_7322_IBSymbolErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBStatusChangeCnt_0_OFFS 0x12190
+#define QIB_7322_IBStatusChangeCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkErrRecoveryCnt_0_OFFS 0x12198
+#define QIB_7322_IBLinkErrRecoveryCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_ExcessBufferOvflCnt_0_OFFS 0x121A8
+#define QIB_7322_ExcessBufferOvflCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_LocalLinkIntegrityErrCnt_0_OFFS 0x121B0
+#define QIB_7322_LocalLinkIntegrityErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVlErrCnt_0_OFFS 0x121B8
+#define QIB_7322_RxVlErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDlidFltrCnt_0_OFFS 0x121C0
+#define QIB_7322_RxDlidFltrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVL15DroppedPktCnt_0_OFFS 0x121C8
+#define QIB_7322_RxVL15DroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxOtherLocalPhyErrCnt_0_OFFS 0x121D0
+#define QIB_7322_RxOtherLocalPhyErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxQPInvalidContextCnt_0_OFFS 0x121D8
+#define QIB_7322_RxQPInvalidContextCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxHeadersErrCnt_0_OFFS 0x121F8
+#define QIB_7322_TxHeadersErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvDataCount_0_OFFS 0x12218
+#define QIB_7322_PSRcvDataCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvPktsCount_0_OFFS 0x12220
+#define QIB_7322_PSRcvPktsCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitDataCount_0_OFFS 0x12228
+#define QIB_7322_PSXmitDataCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitPktsCount_0_OFFS 0x12230
+#define QIB_7322_PSXmitPktsCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitWaitCount_0_OFFS 0x12238
+#define QIB_7322_PSXmitWaitCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_1_OFFS 0x13000
+#define QIB_7322_LBIntCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxCreditUpToDateTimeOut_1_OFFS 0x13008
+#define QIB_7322_TxCreditUpToDateTimeOut_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxSDmaDescCnt_1_OFFS 0x13010
+#define QIB_7322_TxSDmaDescCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnsupVLErrCnt_1_OFFS 0x13018
+#define QIB_7322_TxUnsupVLErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDataPktCnt_1_OFFS 0x13020
+#define QIB_7322_TxDataPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowPktCnt_1_OFFS 0x13028
+#define QIB_7322_TxFlowPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDwordCnt_1_OFFS 0x13030
+#define QIB_7322_TxDwordCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxLenErrCnt_1_OFFS 0x13038
+#define QIB_7322_TxLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxMaxMinLenErrCnt_1_OFFS 0x13040
+#define QIB_7322_TxMaxMinLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnderrunCnt_1_OFFS 0x13048
+#define QIB_7322_TxUnderrunCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowStallCnt_1_OFFS 0x13050
+#define QIB_7322_TxFlowStallCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDroppedPktCnt_1_OFFS 0x13058
+#define QIB_7322_TxDroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDroppedPktCnt_1_OFFS 0x13060
+#define QIB_7322_RxDroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDataPktCnt_1_OFFS 0x13068
+#define QIB_7322_RxDataPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowPktCnt_1_OFFS 0x13070
+#define QIB_7322_RxFlowPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDwordCnt_1_OFFS 0x13078
+#define QIB_7322_RxDwordCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenErrCnt_1_OFFS 0x13080
+#define QIB_7322_RxLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxMaxMinLenErrCnt_1_OFFS 0x13088
+#define QIB_7322_RxMaxMinLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxICRCErrCnt_1_OFFS 0x13090
+#define QIB_7322_RxICRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVCRCErrCnt_1_OFFS 0x13098
+#define QIB_7322_RxVCRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowCtrlViolCnt_1_OFFS 0x130A0
+#define QIB_7322_RxFlowCtrlViolCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVersionErrCnt_1_OFFS 0x130A8
+#define QIB_7322_RxVersionErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLinkMalformCnt_1_OFFS 0x130B0
+#define QIB_7322_RxLinkMalformCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxEBPCnt_1_OFFS 0x130B8
+#define QIB_7322_RxEBPCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLPCRCErrCnt_1_OFFS 0x130C0
+#define QIB_7322_RxLPCRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxBufOvflCnt_1_OFFS 0x130C8
+#define QIB_7322_RxBufOvflCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenTruncateCnt_1_OFFS 0x130D0
+#define QIB_7322_RxLenTruncateCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxPKeyMismatchCnt_1_OFFS 0x130E0
+#define QIB_7322_RxPKeyMismatchCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkDownedCnt_1_OFFS 0x13180
+#define QIB_7322_IBLinkDownedCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBSymbolErrCnt_1_OFFS 0x13188
+#define QIB_7322_IBSymbolErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBStatusChangeCnt_1_OFFS 0x13190
+#define QIB_7322_IBStatusChangeCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkErrRecoveryCnt_1_OFFS 0x13198
+#define QIB_7322_IBLinkErrRecoveryCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_ExcessBufferOvflCnt_1_OFFS 0x131A8
+#define QIB_7322_ExcessBufferOvflCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_LocalLinkIntegrityErrCnt_1_OFFS 0x131B0
+#define QIB_7322_LocalLinkIntegrityErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVlErrCnt_1_OFFS 0x131B8
+#define QIB_7322_RxVlErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDlidFltrCnt_1_OFFS 0x131C0
+#define QIB_7322_RxDlidFltrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVL15DroppedPktCnt_1_OFFS 0x131C8
+#define QIB_7322_RxVL15DroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxOtherLocalPhyErrCnt_1_OFFS 0x131D0
+#define QIB_7322_RxOtherLocalPhyErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxQPInvalidContextCnt_1_OFFS 0x131D8
+#define QIB_7322_RxQPInvalidContextCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxHeadersErrCnt_1_OFFS 0x131F8
+#define QIB_7322_TxHeadersErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvDataCount_1_OFFS 0x13218
+#define QIB_7322_PSRcvDataCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvPktsCount_1_OFFS 0x13220
+#define QIB_7322_PSRcvPktsCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitDataCount_1_OFFS 0x13228
+#define QIB_7322_PSXmitDataCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitPktsCount_1_OFFS 0x13230
+#define QIB_7322_PSXmitPktsCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitWaitCount_1_OFFS 0x13238
+#define QIB_7322_PSXmitWaitCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_RcvEgrArray_OFFS 0x14000
+#define QIB_7322_RcvEgrArray_DEF 0x0000000000000000
+#define QIB_7322_RcvEgrArray_RT_BufSize_LSB 0x25
+#define QIB_7322_RcvEgrArray_RT_BufSize_MSB 0x27
+#define QIB_7322_RcvEgrArray_RT_BufSize_RMASK 0x7
+#define QIB_7322_RcvEgrArray_RT_Addr_LSB 0x0
+#define QIB_7322_RcvEgrArray_RT_Addr_MSB 0x24
+#define QIB_7322_RcvEgrArray_RT_Addr_RMASK 0x1FFFFFFFFF
+
+#define QIB_7322_RcvTIDArray0_OFFS 0x50000
+#define QIB_7322_RcvTIDArray0_DEF 0x0000000000000000
+#define QIB_7322_RcvTIDArray0_RT_BufSize_LSB 0x25
+#define QIB_7322_RcvTIDArray0_RT_BufSize_MSB 0x27
+#define QIB_7322_RcvTIDArray0_RT_BufSize_RMASK 0x7
+#define QIB_7322_RcvTIDArray0_RT_Addr_LSB 0x0
+#define QIB_7322_RcvTIDArray0_RT_Addr_MSB 0x24
+#define QIB_7322_RcvTIDArray0_RT_Addr_RMASK 0x1FFFFFFFFF
+
+#define QIB_7322_IBSD_DDS_MAP_TABLE_0_OFFS 0xD0000
+#define QIB_7322_IBSD_DDS_MAP_TABLE_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrTail0_OFFS 0x200000
+#define QIB_7322_RcvHdrTail0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrHead0_OFFS 0x200008
+#define QIB_7322_RcvHdrHead0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrHead0_counter_LSB 0x20
+#define QIB_7322_RcvHdrHead0_counter_MSB 0x2F
+#define QIB_7322_RcvHdrHead0_counter_RMASK 0xFFFF
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_LSB 0x0
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_MSB 0x1F
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_RMASK 0xFFFFFFFF
+
+#define QIB_7322_RcvEgrIndexTail0_OFFS 0x200010
+#define QIB_7322_RcvEgrIndexTail0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvEgrIndexHead0_OFFS 0x200018
+#define QIB_7322_RcvEgrIndexHead0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvTIDFlowTable0_OFFS 0x201000
+#define QIB_7322_RcvTIDFlowTable0_DEF 0x0000000000000000
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_LSB 0x1C
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_MSB 0x1C
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_LSB 0x1B
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_MSB 0x1B
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_LSB 0x16
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_MSB 0x16
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_LSB 0x15
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_MSB 0x15
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_LSB 0x14
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_MSB 0x14
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_LSB 0x13
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_MSB 0x13
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_GenVal_LSB 0xB
+#define QIB_7322_RcvTIDFlowTable0_GenVal_MSB 0x12
+#define QIB_7322_RcvTIDFlowTable0_GenVal_RMASK 0xFF
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_LSB 0x0
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_MSB 0xA
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_RMASK 0x7FF
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
new file mode 100644
index 000000000000..b3955ed8f794
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -0,0 +1,758 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _QIB_COMMON_H
+#define _QIB_COMMON_H
+
+/*
+ * This file contains defines, structures, etc. that are used
+ * to communicate between kernel and user code.
+ */
+
+/* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */
+#define QIB_SRC_OUI_1 0x00
+#define QIB_SRC_OUI_2 0x11
+#define QIB_SRC_OUI_3 0x75
+
+/* version of protocol header (known to chip also). In the long run,
+ * we should be able to generate and accept a range of version numbers;
+ * for now we only accept one, and it's compiled in.
+ */
+#define IPS_PROTO_VERSION 2
+
+/*
+ * These are compile time constants that you may want to enable or disable
+ * if you are trying to debug problems with code or performance.
+ * QIB_VERBOSE_TRACING define as 1 if you want additional tracing in
+ * fastpath code
+ * QIB_TRACE_REGWRITES define as 1 if you want register writes to be
+ * traced in faspath code
+ * _QIB_TRACING define as 0 if you want to remove all tracing in a
+ * compilation unit
+ */
+
+/*
+ * The value in the BTH QP field that QLogic_IB uses to differentiate
+ * an qlogic_ib protocol IB packet vs standard IB transport
+ * This it needs to be even (0x656b78), because the LSB is sometimes
+ * used for the MSB of context. The change may cause a problem
+ * interoperating with older software.
+ */
+#define QIB_KD_QP 0x656b78
+
+/*
+ * These are the status bits readable (in ascii form, 64bit value)
+ * from the "status" sysfs file. For binary compatibility, values
+ * must remain as is; removed states can be reused for different
+ * purposes.
+ */
+#define QIB_STATUS_INITTED 0x1 /* basic initialization done */
+/* Chip has been found and initted */
+#define QIB_STATUS_CHIP_PRESENT 0x20
+/* IB link is at ACTIVE, usable for data traffic */
+#define QIB_STATUS_IB_READY 0x40
+/* link is configured, LID, MTU, etc. have been set */
+#define QIB_STATUS_IB_CONF 0x80
+/* A Fatal hardware error has occurred. */
+#define QIB_STATUS_HWERROR 0x200
+
+/*
+ * The list of usermode accessible registers. Also see Reg_* later in file.
+ */
+enum qib_ureg {
+ /* (RO) DMA RcvHdr to be used next. */
+ ur_rcvhdrtail = 0,
+ /* (RW) RcvHdr entry to be processed next by host. */
+ ur_rcvhdrhead = 1,
+ /* (RO) Index of next Eager index to use. */
+ ur_rcvegrindextail = 2,
+ /* (RW) Eager TID to be processed next */
+ ur_rcvegrindexhead = 3,
+ /* For internal use only; max register number. */
+ _QIB_UregMax
+};
+
+/* bit values for spi_runtime_flags */
+#define QIB_RUNTIME_PCIE 0x0002
+#define QIB_RUNTIME_FORCE_WC_ORDER 0x0004
+#define QIB_RUNTIME_RCVHDR_COPY 0x0008
+#define QIB_RUNTIME_MASTER 0x0010
+#define QIB_RUNTIME_RCHK 0x0020
+#define QIB_RUNTIME_NODMA_RTAIL 0x0080
+#define QIB_RUNTIME_SPECIAL_TRIGGER 0x0100
+#define QIB_RUNTIME_SDMA 0x0200
+#define QIB_RUNTIME_FORCE_PIOAVAIL 0x0400
+#define QIB_RUNTIME_PIO_REGSWAPPED 0x0800
+#define QIB_RUNTIME_CTXT_MSB_IN_QP 0x1000
+#define QIB_RUNTIME_CTXT_REDIRECT 0x2000
+#define QIB_RUNTIME_HDRSUPP 0x4000
+
+/*
+ * This structure is returned by qib_userinit() immediately after
+ * open to get implementation-specific info, and info specific to this
+ * instance.
+ *
+ * This struct must have explict pad fields where type sizes
+ * may result in different alignments between 32 and 64 bit
+ * programs, since the 64 bit * bit kernel requires the user code
+ * to have matching offsets
+ */
+struct qib_base_info {
+ /* version of hardware, for feature checking. */
+ __u32 spi_hw_version;
+ /* version of software, for feature checking. */
+ __u32 spi_sw_version;
+ /* QLogic_IB context assigned, goes into sent packets */
+ __u16 spi_ctxt;
+ __u16 spi_subctxt;
+ /*
+ * IB MTU, packets IB data must be less than this.
+ * The MTU is in bytes, and will be a multiple of 4 bytes.
+ */
+ __u32 spi_mtu;
+ /*
+ * Size of a PIO buffer. Any given packet's total size must be less
+ * than this (in words). Included is the starting control word, so
+ * if 513 is returned, then total pkt size is 512 words or less.
+ */
+ __u32 spi_piosize;
+ /* size of the TID cache in qlogic_ib, in entries */
+ __u32 spi_tidcnt;
+ /* size of the TID Eager list in qlogic_ib, in entries */
+ __u32 spi_tidegrcnt;
+ /* size of a single receive header queue entry in words. */
+ __u32 spi_rcvhdrent_size;
+ /*
+ * Count of receive header queue entries allocated.
+ * This may be less than the spu_rcvhdrcnt passed in!.
+ */
+ __u32 spi_rcvhdr_cnt;
+
+ /* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */
+ __u32 spi_runtime_flags;
+
+ /* address where hardware receive header queue is mapped */
+ __u64 spi_rcvhdr_base;
+
+ /* user program. */
+
+ /* base address of eager TID receive buffers used by hardware. */
+ __u64 spi_rcv_egrbufs;
+
+ /* Allocated by initialization code, not by protocol. */
+
+ /*
+ * Size of each TID buffer in host memory, starting at
+ * spi_rcv_egrbufs. The buffers are virtually contiguous.
+ */
+ __u32 spi_rcv_egrbufsize;
+ /*
+ * The special QP (queue pair) value that identifies an qlogic_ib
+ * protocol packet from standard IB packets. More, probably much
+ * more, to be added.
+ */
+ __u32 spi_qpair;
+
+ /*
+ * User register base for init code, not to be used directly by
+ * protocol or applications. Always points to chip registers,
+ * for normal or shared context.
+ */
+ __u64 spi_uregbase;
+ /*
+ * Maximum buffer size in bytes that can be used in a single TID
+ * entry (assuming the buffer is aligned to this boundary). This is
+ * the minimum of what the hardware and software support Guaranteed
+ * to be a power of 2.
+ */
+ __u32 spi_tid_maxsize;
+ /*
+ * alignment of each pio send buffer (byte count
+ * to add to spi_piobufbase to get to second buffer)
+ */
+ __u32 spi_pioalign;
+ /*
+ * The index of the first pio buffer available to this process;
+ * needed to do lookup in spi_pioavailaddr; not added to
+ * spi_piobufbase.
+ */
+ __u32 spi_pioindex;
+ /* number of buffers mapped for this process */
+ __u32 spi_piocnt;
+
+ /*
+ * Base address of writeonly pio buffers for this process.
+ * Each buffer has spi_piosize words, and is aligned on spi_pioalign
+ * boundaries. spi_piocnt buffers are mapped from this address
+ */
+ __u64 spi_piobufbase;
+
+ /*
+ * Base address of readonly memory copy of the pioavail registers.
+ * There are 2 bits for each buffer.
+ */
+ __u64 spi_pioavailaddr;
+
+ /*
+ * Address where driver updates a copy of the interface and driver
+ * status (QIB_STATUS_*) as a 64 bit value. It's followed by a
+ * link status qword (formerly combined with driver status), then a
+ * string indicating hardware error, if there was one.
+ */
+ __u64 spi_status;
+
+ /* number of chip ctxts available to user processes */
+ __u32 spi_nctxts;
+ __u16 spi_unit; /* unit number of chip we are using */
+ __u16 spi_port; /* IB port number we are using */
+ /* num bufs in each contiguous set */
+ __u32 spi_rcv_egrperchunk;
+ /* size in bytes of each contiguous set */
+ __u32 spi_rcv_egrchunksize;
+ /* total size of mmap to cover full rcvegrbuffers */
+ __u32 spi_rcv_egrbuftotlen;
+ __u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */
+ /* address of readonly memory copy of the rcvhdrq tail register. */
+ __u64 spi_rcvhdr_tailaddr;
+
+ /*
+ * shared memory pages for subctxts if ctxt is shared; these cover
+ * all the processes in the group sharing a single context.
+ * all have enough space for the num_subcontexts value on this job.
+ */
+ __u64 spi_subctxt_uregbase;
+ __u64 spi_subctxt_rcvegrbuf;
+ __u64 spi_subctxt_rcvhdr_base;
+
+ /* shared memory page for send buffer disarm status */
+ __u64 spi_sendbuf_status;
+} __attribute__ ((aligned(8)));
+
+/*
+ * This version number is given to the driver by the user code during
+ * initialization in the spu_userversion field of qib_user_info, so
+ * the driver can check for compatibility with user code.
+ *
+ * The major version changes when data structures
+ * change in an incompatible way. The driver must be the same or higher
+ * for initialization to succeed. In some cases, a higher version
+ * driver will not interoperate with older software, and initialization
+ * will return an error.
+ */
+#define QIB_USER_SWMAJOR 1
+
+/*
+ * Minor version differences are always compatible
+ * a within a major version, however if user software is larger
+ * than driver software, some new features and/or structure fields
+ * may not be implemented; the user code must deal with this if it
+ * cares, or it must abort after initialization reports the difference.
+ */
+#define QIB_USER_SWMINOR 10
+
+#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
+
+#ifndef QIB_KERN_TYPE
+#define QIB_KERN_TYPE 0
+#define QIB_IDSTR "QLogic kernel.org driver"
+#endif
+
+/*
+ * Similarly, this is the kernel version going back to the user. It's
+ * slightly different, in that we want to tell if the driver was built as
+ * part of a QLogic release, or from the driver from openfabrics.org,
+ * kernel.org, or a standard distribution, for support reasons.
+ * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
+ *
+ * It's returned by the driver to the user code during initialization in the
+ * spi_sw_version field of qib_base_info, so the user code can in turn
+ * check for compatibility with the kernel.
+*/
+#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
+
+/*
+ * This structure is passed to qib_userinit() to tell the driver where
+ * user code buffers are, sizes, etc. The offsets and sizes of the
+ * fields must remain unchanged, for binary compatibility. It can
+ * be extended, if userversion is changed so user code can tell, if needed
+ */
+struct qib_user_info {
+ /*
+ * version of user software, to detect compatibility issues.
+ * Should be set to QIB_USER_SWVERSION.
+ */
+ __u32 spu_userversion;
+
+ __u32 _spu_unused2;
+
+ /* size of struct base_info to write to */
+ __u32 spu_base_info_size;
+
+ __u32 _spu_unused3;
+
+ /*
+ * If two or more processes wish to share a context, each process
+ * must set the spu_subctxt_cnt and spu_subctxt_id to the same
+ * values. The only restriction on the spu_subctxt_id is that
+ * it be unique for a given node.
+ */
+ __u16 spu_subctxt_cnt;
+ __u16 spu_subctxt_id;
+
+ __u32 spu_port; /* IB port requested by user if > 0 */
+
+ /*
+ * address of struct base_info to write to
+ */
+ __u64 spu_base_info;
+
+} __attribute__ ((aligned(8)));
+
+/* User commands. */
+
+/* 16 available, was: old set up userspace (for old user code) */
+#define QIB_CMD_CTXT_INFO 17 /* find out what resources we got */
+#define QIB_CMD_RECV_CTRL 18 /* control receipt of packets */
+#define QIB_CMD_TID_UPDATE 19 /* update expected TID entries */
+#define QIB_CMD_TID_FREE 20 /* free expected TID entries */
+#define QIB_CMD_SET_PART_KEY 21 /* add partition key */
+/* 22 available, was: return info on slave processes (for old user code) */
+#define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */
+#define QIB_CMD_USER_INIT 24 /* set up userspace */
+#define QIB_CMD_UNUSED_1 25
+#define QIB_CMD_UNUSED_2 26
+#define QIB_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
+#define QIB_CMD_POLL_TYPE 28 /* set the kind of polling we want */
+#define QIB_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
+/* 30 is unused */
+#define QIB_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
+#define QIB_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
+/* 33 available, was a testing feature */
+#define QIB_CMD_DISARM_BUFS 34 /* disarm send buffers w/ errors */
+#define QIB_CMD_ACK_EVENT 35 /* ack & clear bits */
+#define QIB_CMD_CPUS_LIST 36 /* list of cpus allocated, for pinned
+ * processes: qib_cpus_list */
+
+/*
+ * QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for
+ * compatibility with libraries from previous release. The ACK_EVENT
+ * will take appropriate driver action (if any, just DISARM for now),
+ * then clear the bits passed in as part of the mask. These bits are
+ * in the first 64bit word at spi_sendbuf_status, and are passed to
+ * the driver in the event_mask union as well.
+ */
+#define _QIB_EVENT_DISARM_BUFS_BIT 0
+#define _QIB_EVENT_LINKDOWN_BIT 1
+#define _QIB_EVENT_LID_CHANGE_BIT 2
+#define _QIB_EVENT_LMC_CHANGE_BIT 3
+#define _QIB_EVENT_SL2VL_CHANGE_BIT 4
+#define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT
+
+#define QIB_EVENT_DISARM_BUFS_BIT (1UL << _QIB_EVENT_DISARM_BUFS_BIT)
+#define QIB_EVENT_LINKDOWN_BIT (1UL << _QIB_EVENT_LINKDOWN_BIT)
+#define QIB_EVENT_LID_CHANGE_BIT (1UL << _QIB_EVENT_LID_CHANGE_BIT)
+#define QIB_EVENT_LMC_CHANGE_BIT (1UL << _QIB_EVENT_LMC_CHANGE_BIT)
+#define QIB_EVENT_SL2VL_CHANGE_BIT (1UL << _QIB_EVENT_SL2VL_CHANGE_BIT)
+
+
+/*
+ * Poll types
+ */
+#define QIB_POLL_TYPE_ANYRCV 0x0
+#define QIB_POLL_TYPE_URGENT 0x1
+
+struct qib_ctxt_info {
+ __u16 num_active; /* number of active units */
+ __u16 unit; /* unit (chip) assigned to caller */
+ __u16 port; /* IB port assigned to caller (1-based) */
+ __u16 ctxt; /* ctxt on unit assigned to caller */
+ __u16 subctxt; /* subctxt on unit assigned to caller */
+ __u16 num_ctxts; /* number of ctxts available on unit */
+ __u16 num_subctxts; /* number of subctxts opened on ctxt */
+ __u16 rec_cpu; /* cpu # for affinity (ffff if none) */
+};
+
+struct qib_tid_info {
+ __u32 tidcnt;
+ /* make structure same size in 32 and 64 bit */
+ __u32 tid__unused;
+ /* virtual address of first page in transfer */
+ __u64 tidvaddr;
+ /* pointer (same size 32/64 bit) to __u16 tid array */
+ __u64 tidlist;
+
+ /*
+ * pointer (same size 32/64 bit) to bitmap of TIDs used
+ * for this call; checked for being large enough at open
+ */
+ __u64 tidmap;
+};
+
+struct qib_cmd {
+ __u32 type; /* command type */
+ union {
+ struct qib_tid_info tid_info;
+ struct qib_user_info user_info;
+
+ /*
+ * address in userspace where we should put the sdma
+ * inflight counter
+ */
+ __u64 sdma_inflight;
+ /*
+ * address in userspace where we should put the sdma
+ * completion counter
+ */
+ __u64 sdma_complete;
+ /* address in userspace of struct qib_ctxt_info to
+ write result to */
+ __u64 ctxt_info;
+ /* enable/disable receipt of packets */
+ __u32 recv_ctrl;
+ /* enable/disable armlaunch errors (non-zero to enable) */
+ __u32 armlaunch_ctrl;
+ /* partition key to set */
+ __u16 part_key;
+ /* user address of __u32 bitmask of active slaves */
+ __u64 slave_mask_addr;
+ /* type of polling we want */
+ __u16 poll_type;
+ /* back pressure enable bit for one particular context */
+ __u8 ctxt_bp;
+ /* qib_user_event_ack(), IPATH_EVENT_* bits */
+ __u64 event_mask;
+ } cmd;
+};
+
+struct qib_iovec {
+ /* Pointer to data, but same size 32 and 64 bit */
+ __u64 iov_base;
+
+ /*
+ * Length of data; don't need 64 bits, but want
+ * qib_sendpkt to remain same size as before 32 bit changes, so...
+ */
+ __u64 iov_len;
+};
+
+/*
+ * Describes a single packet for send. Each packet can have one or more
+ * buffers, but the total length (exclusive of IB headers) must be less
+ * than the MTU, and if using the PIO method, entire packet length,
+ * including IB headers, must be less than the qib_piosize value (words).
+ * Use of this necessitates including sys/uio.h
+ */
+struct __qib_sendpkt {
+ __u32 sps_flags; /* flags for packet (TBD) */
+ __u32 sps_cnt; /* number of entries to use in sps_iov */
+ /* array of iov's describing packet. TEMPORARY */
+ struct qib_iovec sps_iov[4];
+};
+
+/*
+ * Diagnostics can send a packet by "writing" the following
+ * structs to the diag data special file.
+ * This allows a custom
+ * pbc (+ static rate) qword, so that special modes and deliberate
+ * changes to CRCs can be used. The elements were also re-ordered
+ * for better alignment and to avoid padding issues.
+ */
+#define _DIAG_XPKT_VERS 3
+struct qib_diag_xpkt {
+ __u16 version;
+ __u16 unit;
+ __u16 port;
+ __u16 len;
+ __u64 data;
+ __u64 pbc_wd;
+};
+
+/*
+ * Data layout in I2C flash (for GUID, etc.)
+ * All fields are little-endian binary unless otherwise stated
+ */
+#define QIB_FLASH_VERSION 2
+struct qib_flash {
+ /* flash layout version (QIB_FLASH_VERSION) */
+ __u8 if_fversion;
+ /* checksum protecting if_length bytes */
+ __u8 if_csum;
+ /*
+ * valid length (in use, protected by if_csum), including
+ * if_fversion and if_csum themselves)
+ */
+ __u8 if_length;
+ /* the GUID, in network order */
+ __u8 if_guid[8];
+ /* number of GUIDs to use, starting from if_guid */
+ __u8 if_numguid;
+ /* the (last 10 characters of) board serial number, in ASCII */
+ char if_serial[12];
+ /* board mfg date (YYYYMMDD ASCII) */
+ char if_mfgdate[8];
+ /* last board rework/test date (YYYYMMDD ASCII) */
+ char if_testdate[8];
+ /* logging of error counts, TBD */
+ __u8 if_errcntp[4];
+ /* powered on hours, updated at driver unload */
+ __u8 if_powerhour[2];
+ /* ASCII free-form comment field */
+ char if_comment[32];
+ /* Backwards compatible prefix for longer QLogic Serial Numbers */
+ char if_sprefix[4];
+ /* 82 bytes used, min flash size is 128 bytes */
+ __u8 if_future[46];
+};
+
+/*
+ * These are the counters implemented in the chip, and are listed in order.
+ * The InterCaps naming is taken straight from the chip spec.
+ */
+struct qlogic_ib_counters {
+ __u64 LBIntCnt;
+ __u64 LBFlowStallCnt;
+ __u64 TxSDmaDescCnt; /* was Reserved1 */
+ __u64 TxUnsupVLErrCnt;
+ __u64 TxDataPktCnt;
+ __u64 TxFlowPktCnt;
+ __u64 TxDwordCnt;
+ __u64 TxLenErrCnt;
+ __u64 TxMaxMinLenErrCnt;
+ __u64 TxUnderrunCnt;
+ __u64 TxFlowStallCnt;
+ __u64 TxDroppedPktCnt;
+ __u64 RxDroppedPktCnt;
+ __u64 RxDataPktCnt;
+ __u64 RxFlowPktCnt;
+ __u64 RxDwordCnt;
+ __u64 RxLenErrCnt;
+ __u64 RxMaxMinLenErrCnt;
+ __u64 RxICRCErrCnt;
+ __u64 RxVCRCErrCnt;
+ __u64 RxFlowCtrlErrCnt;
+ __u64 RxBadFormatCnt;
+ __u64 RxLinkProblemCnt;
+ __u64 RxEBPCnt;
+ __u64 RxLPCRCErrCnt;
+ __u64 RxBufOvflCnt;
+ __u64 RxTIDFullErrCnt;
+ __u64 RxTIDValidErrCnt;
+ __u64 RxPKeyMismatchCnt;
+ __u64 RxP0HdrEgrOvflCnt;
+ __u64 RxP1HdrEgrOvflCnt;
+ __u64 RxP2HdrEgrOvflCnt;
+ __u64 RxP3HdrEgrOvflCnt;
+ __u64 RxP4HdrEgrOvflCnt;
+ __u64 RxP5HdrEgrOvflCnt;
+ __u64 RxP6HdrEgrOvflCnt;
+ __u64 RxP7HdrEgrOvflCnt;
+ __u64 RxP8HdrEgrOvflCnt;
+ __u64 RxP9HdrEgrOvflCnt;
+ __u64 RxP10HdrEgrOvflCnt;
+ __u64 RxP11HdrEgrOvflCnt;
+ __u64 RxP12HdrEgrOvflCnt;
+ __u64 RxP13HdrEgrOvflCnt;
+ __u64 RxP14HdrEgrOvflCnt;
+ __u64 RxP15HdrEgrOvflCnt;
+ __u64 RxP16HdrEgrOvflCnt;
+ __u64 IBStatusChangeCnt;
+ __u64 IBLinkErrRecoveryCnt;
+ __u64 IBLinkDownedCnt;
+ __u64 IBSymbolErrCnt;
+ __u64 RxVL15DroppedPktCnt;
+ __u64 RxOtherLocalPhyErrCnt;
+ __u64 PcieRetryBufDiagQwordCnt;
+ __u64 ExcessBufferOvflCnt;
+ __u64 LocalLinkIntegrityErrCnt;
+ __u64 RxVlErrCnt;
+ __u64 RxDlidFltrCnt;
+};
+
+/*
+ * The next set of defines are for packet headers, and chip register
+ * and memory bits that are visible to and/or used by user-mode software.
+ */
+
+/* RcvHdrFlags bits */
+#define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF
+#define QLOGIC_IB_RHF_LENGTH_SHIFT 0
+#define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7
+#define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11
+#define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF
+#define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16
+#define QLOGIC_IB_RHF_SEQ_MASK 0xF
+#define QLOGIC_IB_RHF_SEQ_SHIFT 0
+#define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF
+#define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4
+#define QLOGIC_IB_RHF_H_ICRCERR 0x80000000
+#define QLOGIC_IB_RHF_H_VCRCERR 0x40000000
+#define QLOGIC_IB_RHF_H_PARITYERR 0x20000000
+#define QLOGIC_IB_RHF_H_LENERR 0x10000000
+#define QLOGIC_IB_RHF_H_MTUERR 0x08000000
+#define QLOGIC_IB_RHF_H_IHDRERR 0x04000000
+#define QLOGIC_IB_RHF_H_TIDERR 0x02000000
+#define QLOGIC_IB_RHF_H_MKERR 0x01000000
+#define QLOGIC_IB_RHF_H_IBERR 0x00800000
+#define QLOGIC_IB_RHF_H_ERR_MASK 0xFF800000
+#define QLOGIC_IB_RHF_L_USE_EGR 0x80000000
+#define QLOGIC_IB_RHF_L_SWA 0x00008000
+#define QLOGIC_IB_RHF_L_SWB 0x00004000
+
+/* qlogic_ib header fields */
+#define QLOGIC_IB_I_VERS_MASK 0xF
+#define QLOGIC_IB_I_VERS_SHIFT 28
+#define QLOGIC_IB_I_CTXT_MASK 0xF
+#define QLOGIC_IB_I_CTXT_SHIFT 24
+#define QLOGIC_IB_I_TID_MASK 0x7FF
+#define QLOGIC_IB_I_TID_SHIFT 13
+#define QLOGIC_IB_I_OFFSET_MASK 0x1FFF
+#define QLOGIC_IB_I_OFFSET_SHIFT 0
+
+/* K_PktFlags bits */
+#define QLOGIC_IB_KPF_INTR 0x1
+#define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3
+#define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1
+
+#define QLOGIC_IB_MAX_SUBCTXT 4
+
+/* SendPIO per-buffer control */
+#define QLOGIC_IB_SP_TEST 0x40
+#define QLOGIC_IB_SP_TESTEBP 0x20
+#define QLOGIC_IB_SP_TRIGGER_SHIFT 15
+
+/* SendPIOAvail bits */
+#define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1
+#define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0
+
+/* qlogic_ib header format */
+struct qib_header {
+ /*
+ * Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset -
+ * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
+ * Context 4, TID 11, offset 13.
+ */
+ __le32 ver_ctxt_tid_offset;
+ __le16 chksum;
+ __le16 pkt_flags;
+};
+
+/*
+ * qlogic_ib user message header format.
+ * This structure contains the first 4 fields common to all protocols
+ * that employ qlogic_ib.
+ */
+struct qib_message_header {
+ __be16 lrh[4];
+ __be32 bth[3];
+ /* fields below this point are in host byte order */
+ struct qib_header iph;
+ __u8 sub_opcode;
+};
+
+/* IB - LRH header consts */
+#define QIB_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
+#define QIB_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
+
+/* misc. */
+#define SIZE_OF_CRC 1
+
+#define QIB_DEFAULT_P_KEY 0xFFFF
+#define QIB_PERMISSIVE_LID 0xFFFF
+#define QIB_AETH_CREDIT_SHIFT 24
+#define QIB_AETH_CREDIT_MASK 0x1F
+#define QIB_AETH_CREDIT_INVAL 0x1F
+#define QIB_PSN_MASK 0xFFFFFF
+#define QIB_MSN_MASK 0xFFFFFF
+#define QIB_QPN_MASK 0xFFFFFF
+#define QIB_MULTICAST_LID_BASE 0xC000
+#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
+#define QIB_MULTICAST_QPN 0xFFFFFF
+
+/* Receive Header Queue: receive type (from qlogic_ib) */
+#define RCVHQ_RCV_TYPE_EXPECTED 0
+#define RCVHQ_RCV_TYPE_EAGER 1
+#define RCVHQ_RCV_TYPE_NON_KD 2
+#define RCVHQ_RCV_TYPE_ERROR 3
+
+#define QIB_HEADER_QUEUE_WORDS 9
+
+/* functions for extracting fields from rcvhdrq entries for the driver.
+ */
+static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf)
+{
+ return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK;
+}
+
+static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) &
+ QLOGIC_IB_RHF_RCVTYPE_MASK;
+}
+
+static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf)
+{
+ return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) &
+ QLOGIC_IB_RHF_LENGTH_MASK) << 2;
+}
+
+static inline __u32 qib_hdrget_index(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) &
+ QLOGIC_IB_RHF_EGRINDEX_MASK;
+}
+
+static inline __u32 qib_hdrget_seq(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) &
+ QLOGIC_IB_RHF_SEQ_MASK;
+}
+
+static inline __u32 qib_hdrget_offset(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) &
+ QLOGIC_IB_RHF_HDRQ_OFFSET_MASK;
+}
+
+static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
+{
+ return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
+}
+
+static inline __u32 qib_hdrget_qib_ver(__le32 hdrword)
+{
+ return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) &
+ QLOGIC_IB_I_VERS_MASK;
+}
+
+#endif /* _QIB_COMMON_H */
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
new file mode 100644
index 000000000000..a86cbf880f98
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_cq_enter - add a new entry to the completion queue
+ * @cq: completion queue
+ * @entry: work completion entry to add
+ * @sig: true if @entry is a solicitated entry
+ *
+ * This may be called with qp->s_lock held.
+ */
+void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
+{
+ struct qib_cq_wc *wc;
+ unsigned long flags;
+ u32 head;
+ u32 next;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ /*
+ * Note that the head pointer might be writable by user processes.
+ * Take care to verify it is a sane value.
+ */
+ wc = cq->queue;
+ head = wc->head;
+ if (head >= (unsigned) cq->ibcq.cqe) {
+ head = cq->ibcq.cqe;
+ next = 0;
+ } else
+ next = head + 1;
+ if (unlikely(next == wc->tail)) {
+ spin_unlock_irqrestore(&cq->lock, flags);
+ if (cq->ibcq.event_handler) {
+ struct ib_event ev;
+
+ ev.device = cq->ibcq.device;
+ ev.element.cq = &cq->ibcq;
+ ev.event = IB_EVENT_CQ_ERR;
+ cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
+ }
+ return;
+ }
+ if (cq->ip) {
+ wc->uqueue[head].wr_id = entry->wr_id;
+ wc->uqueue[head].status = entry->status;
+ wc->uqueue[head].opcode = entry->opcode;
+ wc->uqueue[head].vendor_err = entry->vendor_err;
+ wc->uqueue[head].byte_len = entry->byte_len;
+ wc->uqueue[head].ex.imm_data =
+ (__u32 __force)entry->ex.imm_data;
+ wc->uqueue[head].qp_num = entry->qp->qp_num;
+ wc->uqueue[head].src_qp = entry->src_qp;
+ wc->uqueue[head].wc_flags = entry->wc_flags;
+ wc->uqueue[head].pkey_index = entry->pkey_index;
+ wc->uqueue[head].slid = entry->slid;
+ wc->uqueue[head].sl = entry->sl;
+ wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
+ wc->uqueue[head].port_num = entry->port_num;
+ /* Make sure entry is written before the head index. */
+ smp_wmb();
+ } else
+ wc->kqueue[head] = *entry;
+ wc->head = next;
+
+ if (cq->notify == IB_CQ_NEXT_COMP ||
+ (cq->notify == IB_CQ_SOLICITED && solicited)) {
+ cq->notify = IB_CQ_NONE;
+ cq->triggered++;
+ /*
+ * This will cause send_complete() to be called in
+ * another thread.
+ */
+ queue_work(qib_cq_wq, &cq->comptask);
+ }
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+}
+
+/**
+ * qib_poll_cq - poll for work completion entries
+ * @ibcq: the completion queue to poll
+ * @num_entries: the maximum number of entries to return
+ * @entry: pointer to array where work completions are placed
+ *
+ * Returns the number of completion entries polled.
+ *
+ * This may be called from interrupt context. Also called by ib_poll_cq()
+ * in the generic verbs code.
+ */
+int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+ struct qib_cq *cq = to_icq(ibcq);
+ struct qib_cq_wc *wc;
+ unsigned long flags;
+ int npolled;
+ u32 tail;
+
+ /* The kernel can only poll a kernel completion queue */
+ if (cq->ip) {
+ npolled = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ wc = cq->queue;
+ tail = wc->tail;
+ if (tail > (u32) cq->ibcq.cqe)
+ tail = (u32) cq->ibcq.cqe;
+ for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
+ if (tail == wc->head)
+ break;
+ /* The kernel doesn't need a RMB since it has the lock. */
+ *entry = wc->kqueue[tail];
+ if (tail >= cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ wc->tail = tail;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+bail:
+ return npolled;
+}
+
+static void send_complete(struct work_struct *work)
+{
+ struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
+
+ /*
+ * The completion handler will most likely rearm the notification
+ * and poll for all pending entries. If a new completion entry
+ * is added while we are in this routine, queue_work()
+ * won't call us again until we return so we check triggered to
+ * see if we need to call the handler again.
+ */
+ for (;;) {
+ u8 triggered = cq->triggered;
+
+ /*
+ * IPoIB connected mode assumes the callback is from a
+ * soft IRQ. We simulate this by blocking "bottom halves".
+ * See the implementation for ipoib_cm_handle_tx_wc(),
+ * netif_tx_lock_bh() and netif_tx_lock().
+ */
+ local_bh_disable();
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ local_bh_enable();
+
+ if (cq->triggered == triggered)
+ return;
+ }
+}
+
+/**
+ * qib_create_cq - create a completion queue
+ * @ibdev: the device this completion queue is attached to
+ * @entries: the minimum size of the completion queue
+ * @context: unused by the QLogic_IB driver
+ * @udata: user data for libibverbs.so
+ *
+ * Returns a pointer to the completion queue or negative errno values
+ * for failure.
+ *
+ * Called by ib_create_cq() in the generic verbs code.
+ */
+struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
+ int comp_vector, struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_cq *cq;
+ struct qib_cq_wc *wc;
+ struct ib_cq *ret;
+ u32 sz;
+
+ if (entries < 1 || entries > ib_qib_max_cqes) {
+ ret = ERR_PTR(-EINVAL);
+ goto done;
+ }
+
+ /* Allocate the completion queue structure. */
+ cq = kmalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ /*
+ * Allocate the completion queue entries and head/tail pointers.
+ * This is allocated separately so that it can be resized and
+ * also mapped into user space.
+ * We need to use vmalloc() in order to support mmap and large
+ * numbers of entries.
+ */
+ sz = sizeof(*wc);
+ if (udata && udata->outlen >= sizeof(__u64))
+ sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
+ else
+ sz += sizeof(struct ib_wc) * (entries + 1);
+ wc = vmalloc_user(sz);
+ if (!wc) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_cq;
+ }
+
+ /*
+ * Return the address of the WC as the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ int err;
+
+ cq->ip = qib_create_mmap_info(dev, sz, context, wc);
+ if (!cq->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_wc;
+ }
+
+ err = ib_copy_to_udata(udata, &cq->ip->offset,
+ sizeof(cq->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else
+ cq->ip = NULL;
+
+ spin_lock(&dev->n_cqs_lock);
+ if (dev->n_cqs_allocated == ib_qib_max_cqs) {
+ spin_unlock(&dev->n_cqs_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_cqs_allocated++;
+ spin_unlock(&dev->n_cqs_lock);
+
+ if (cq->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ /*
+ * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
+ * The number of entries should be >= the number requested or return
+ * an error.
+ */
+ cq->ibcq.cqe = entries;
+ cq->notify = IB_CQ_NONE;
+ cq->triggered = 0;
+ spin_lock_init(&cq->lock);
+ INIT_WORK(&cq->comptask, send_complete);
+ wc->head = 0;
+ wc->tail = 0;
+ cq->queue = wc;
+
+ ret = &cq->ibcq;
+
+ goto done;
+
+bail_ip:
+ kfree(cq->ip);
+bail_wc:
+ vfree(wc);
+bail_cq:
+ kfree(cq);
+done:
+ return ret;
+}
+
+/**
+ * qib_destroy_cq - destroy a completion queue
+ * @ibcq: the completion queue to destroy.
+ *
+ * Returns 0 for success.
+ *
+ * Called by ib_destroy_cq() in the generic verbs code.
+ */
+int qib_destroy_cq(struct ib_cq *ibcq)
+{
+ struct qib_ibdev *dev = to_idev(ibcq->device);
+ struct qib_cq *cq = to_icq(ibcq);
+
+ flush_work(&cq->comptask);
+ spin_lock(&dev->n_cqs_lock);
+ dev->n_cqs_allocated--;
+ spin_unlock(&dev->n_cqs_lock);
+ if (cq->ip)
+ kref_put(&cq->ip->ref, qib_release_mmap_info);
+ else
+ vfree(cq->queue);
+ kfree(cq);
+
+ return 0;
+}
+
+/**
+ * qib_req_notify_cq - change the notification type for a completion queue
+ * @ibcq: the completion queue
+ * @notify_flags: the type of notification to request
+ *
+ * Returns 0 for success.
+ *
+ * This may be called from interrupt context. Also called by
+ * ib_req_notify_cq() in the generic verbs code.
+ */
+int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
+{
+ struct qib_cq *cq = to_icq(ibcq);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ /*
+ * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
+ * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
+ */
+ if (cq->notify != IB_CQ_NEXT_COMP)
+ cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
+
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ cq->queue->head != cq->queue->tail)
+ ret = 1;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return ret;
+}
+
+/**
+ * qib_resize_cq - change the size of the CQ
+ * @ibcq: the completion queue
+ *
+ * Returns 0 for success.
+ */
+int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+{
+ struct qib_cq *cq = to_icq(ibcq);
+ struct qib_cq_wc *old_wc;
+ struct qib_cq_wc *wc;
+ u32 head, tail, n;
+ int ret;
+ u32 sz;
+
+ if (cqe < 1 || cqe > ib_qib_max_cqes) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ sz = sizeof(*wc);
+ if (udata && udata->outlen >= sizeof(__u64))
+ sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
+ else
+ sz += sizeof(struct ib_wc) * (cqe + 1);
+ wc = vmalloc_user(sz);
+ if (!wc) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /* Check that we can write the offset to mmap. */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ __u64 offset = 0;
+
+ ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
+ if (ret)
+ goto bail_free;
+ }
+
+ spin_lock_irq(&cq->lock);
+ /*
+ * Make sure head and tail are sane since they
+ * might be user writable.
+ */
+ old_wc = cq->queue;
+ head = old_wc->head;
+ if (head > (u32) cq->ibcq.cqe)
+ head = (u32) cq->ibcq.cqe;
+ tail = old_wc->tail;
+ if (tail > (u32) cq->ibcq.cqe)
+ tail = (u32) cq->ibcq.cqe;
+ if (head < tail)
+ n = cq->ibcq.cqe + 1 + head - tail;
+ else
+ n = head - tail;
+ if (unlikely((u32)cqe < n)) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ for (n = 0; tail != head; n++) {
+ if (cq->ip)
+ wc->uqueue[n] = old_wc->uqueue[tail];
+ else
+ wc->kqueue[n] = old_wc->kqueue[tail];
+ if (tail == (u32) cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ cq->ibcq.cqe = cqe;
+ wc->head = n;
+ wc->tail = 0;
+ cq->queue = wc;
+ spin_unlock_irq(&cq->lock);
+
+ vfree(old_wc);
+
+ if (cq->ip) {
+ struct qib_ibdev *dev = to_idev(ibcq->device);
+ struct qib_mmap_info *ip = cq->ip;
+
+ qib_update_mmap_info(dev, ip, sz, wc);
+
+ /*
+ * Return the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ ret = ib_copy_to_udata(udata, &ip->offset,
+ sizeof(ip->offset));
+ if (ret)
+ goto bail;
+ }
+
+ spin_lock_irq(&dev->pending_lock);
+ if (list_empty(&ip->pending_mmaps))
+ list_add(&ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = 0;
+ goto bail;
+
+bail_unlock:
+ spin_unlock_irq(&cq->lock);
+bail_free:
+ vfree(wc);
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
new file mode 100644
index 000000000000..ca98dd523752
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright (c) 2010 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains support for diagnostic functions. It is accessed by
+ * opening the qib_diag device, normally minor number 129. Diagnostic use
+ * of the QLogic_IB chip may render the chip or board unusable until the
+ * driver is unloaded, or in some cases, until the system is rebooted.
+ *
+ * Accesses to the chip through this interface are not similar to going
+ * through the /sys/bus/pci resource mmap interface.
+ */
+
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/*
+ * Each client that opens the diag device must read then write
+ * offset 0, to prevent lossage from random cat or od. diag_state
+ * sequences this "handshake".
+ */
+enum diag_state { UNUSED = 0, OPENED, INIT, READY };
+
+/* State for an individual client. PID so children cannot abuse handshake */
+static struct qib_diag_client {
+ struct qib_diag_client *next;
+ struct qib_devdata *dd;
+ pid_t pid;
+ enum diag_state state;
+} *client_pool;
+
+/*
+ * Get a client struct. Recycled if possible, else kmalloc.
+ * Must be called with qib_mutex held
+ */
+static struct qib_diag_client *get_client(struct qib_devdata *dd)
+{
+ struct qib_diag_client *dc;
+
+ dc = client_pool;
+ if (dc)
+ /* got from pool remove it and use */
+ client_pool = dc->next;
+ else
+ /* None in pool, alloc and init */
+ dc = kmalloc(sizeof *dc, GFP_KERNEL);
+
+ if (dc) {
+ dc->next = NULL;
+ dc->dd = dd;
+ dc->pid = current->pid;
+ dc->state = OPENED;
+ }
+ return dc;
+}
+
+/*
+ * Return to pool. Must be called with qib_mutex held
+ */
+static void return_client(struct qib_diag_client *dc)
+{
+ struct qib_devdata *dd = dc->dd;
+ struct qib_diag_client *tdc, *rdc;
+
+ rdc = NULL;
+ if (dc == dd->diag_client) {
+ dd->diag_client = dc->next;
+ rdc = dc;
+ } else {
+ tdc = dc->dd->diag_client;
+ while (tdc) {
+ if (dc == tdc->next) {
+ tdc->next = dc->next;
+ rdc = dc;
+ break;
+ }
+ tdc = tdc->next;
+ }
+ }
+ if (rdc) {
+ rdc->state = UNUSED;
+ rdc->dd = NULL;
+ rdc->pid = 0;
+ rdc->next = client_pool;
+ client_pool = rdc;
+ }
+}
+
+static int qib_diag_open(struct inode *in, struct file *fp);
+static int qib_diag_release(struct inode *in, struct file *fp);
+static ssize_t qib_diag_read(struct file *fp, char __user *data,
+ size_t count, loff_t *off);
+static ssize_t qib_diag_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off);
+
+static const struct file_operations diag_file_ops = {
+ .owner = THIS_MODULE,
+ .write = qib_diag_write,
+ .read = qib_diag_read,
+ .open = qib_diag_open,
+ .release = qib_diag_release
+};
+
+static atomic_t diagpkt_count = ATOMIC_INIT(0);
+static struct cdev *diagpkt_cdev;
+static struct device *diagpkt_device;
+
+static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off);
+
+static const struct file_operations diagpkt_file_ops = {
+ .owner = THIS_MODULE,
+ .write = qib_diagpkt_write,
+};
+
+int qib_diag_add(struct qib_devdata *dd)
+{
+ char name[16];
+ int ret = 0;
+
+ if (atomic_inc_return(&diagpkt_count) == 1) {
+ ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",
+ &diagpkt_file_ops, &diagpkt_cdev,
+ &diagpkt_device);
+ if (ret)
+ goto done;
+ }
+
+ snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
+ ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
+ &diag_file_ops, &dd->diag_cdev,
+ &dd->diag_device);
+done:
+ return ret;
+}
+
+static void qib_unregister_observers(struct qib_devdata *dd);
+
+void qib_diag_remove(struct qib_devdata *dd)
+{
+ struct qib_diag_client *dc;
+
+ if (atomic_dec_and_test(&diagpkt_count))
+ qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
+
+ qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
+
+ /*
+ * Return all diag_clients of this device. There should be none,
+ * as we are "guaranteed" that no clients are still open
+ */
+ while (dd->diag_client)
+ return_client(dd->diag_client);
+
+ /* Now clean up all unused client structs */
+ while (client_pool) {
+ dc = client_pool;
+ client_pool = dc->next;
+ kfree(dc);
+ }
+ /* Clean up observer list */
+ qib_unregister_observers(dd);
+}
+
+/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem *
+ *
+ * @dd: the qlogic_ib device
+ * @offs: the offset in chip-space
+ * @cntp: Pointer to max (byte) count for transfer starting at offset
+ * This returns a u32 __iomem * so it can be used for both 64 and 32-bit
+ * mapping. It is needed because with the use of PAT for control of
+ * write-combining, the logically contiguous address-space of the chip
+ * may be split into virtually non-contiguous spaces, with different
+ * attributes, which are them mapped to contiguous physical space
+ * based from the first BAR.
+ *
+ * The code below makes the same assumptions as were made in
+ * init_chip_wc_pat() (qib_init.c), copied here:
+ * Assumes chip address space looks like:
+ * - kregs + sregs + cregs + uregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * or:
+ * - kregs + sregs + cregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * - uregs
+ *
+ * If cntp is non-NULL, returns how many bytes from offset can be accessed
+ * Returns 0 if the offset is not mapped.
+ */
+static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
+ u32 *cntp)
+{
+ u32 kreglen;
+ u32 snd_bottom, snd_lim = 0;
+ u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
+ u32 __iomem *map = NULL;
+ u32 cnt = 0;
+
+ /* First, simplest case, offset is within the first map. */
+ kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
+ if (offset < kreglen) {
+ map = krb32 + (offset / sizeof(u32));
+ cnt = kreglen - offset;
+ goto mapped;
+ }
+
+ /*
+ * Next check for user regs, the next most common case,
+ * and a cheap check because if they are not in the first map
+ * they are last in chip.
+ */
+ if (dd->userbase) {
+ /* If user regs mapped, they are after send, so set limit. */
+ u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
+ snd_lim = dd->uregbase;
+ krb32 = (u32 __iomem *)dd->userbase;
+ if (offset >= dd->uregbase && offset < ulim) {
+ map = krb32 + (offset - dd->uregbase) / sizeof(u32);
+ cnt = ulim - offset;
+ goto mapped;
+ }
+ }
+
+ /*
+ * Lastly, check for offset within Send Buffers.
+ * This is gnarly because struct devdata is deliberately vague
+ * about things like 7322 VL15 buffers, and we are not in
+ * chip-specific code here, so should not make many assumptions.
+ * The one we _do_ make is that the only chip that has more sndbufs
+ * than we admit is the 7322, and it has userregs above that, so
+ * we know the snd_lim.
+ */
+ /* Assume 2K buffers are first. */
+ snd_bottom = dd->pio2k_bufbase;
+ if (snd_lim == 0) {
+ u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
+ snd_lim = snd_bottom + tot2k;
+ }
+ /* If 4k buffers exist, account for them by bumping
+ * appropriate limit.
+ */
+ if (dd->piobcnt4k) {
+ u32 tot4k = dd->piobcnt4k * dd->align4k;
+ u32 offs4k = dd->piobufbase >> 32;
+ if (snd_bottom > offs4k)
+ snd_bottom = offs4k;
+ else {
+ /* 4k above 2k. Bump snd_lim, if needed*/
+ if (!dd->userbase)
+ snd_lim = offs4k + tot4k;
+ }
+ }
+ /*
+ * Judgement call: can we ignore the space between SendBuffs and
+ * UserRegs, where we would like to see vl15 buffs, but not more?
+ */
+ if (offset >= snd_bottom && offset < snd_lim) {
+ offset -= snd_bottom;
+ map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
+ cnt = snd_lim - offset;
+ }
+
+mapped:
+ if (cntp)
+ *cntp = cnt;
+ return map;
+}
+
+/*
+ * qib_read_umem64 - read a 64-bit quantity from the chip into user space
+ * @dd: the qlogic_ib device
+ * @uaddr: the location to store the data in user memory
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @count: number of bytes to copy (multiple of 32 bits)
+ *
+ * This function also localizes all chip memory accesses.
+ * The copy should be written such that we read full cacheline packets
+ * from the chip. This is usually used for a single qword
+ *
+ * NOTE: This assumes the chip address is 64-bit aligned.
+ */
+static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
+ u32 regoffs, size_t count)
+{
+ const u64 __iomem *reg_addr;
+ const u64 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u64));
+
+ /* not very efficient, but it works for now */
+ while (reg_addr < reg_end) {
+ u64 data = readq(reg_addr);
+
+ if (copy_to_user(uaddr, &data, sizeof(u64))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ reg_addr++;
+ uaddr += sizeof(u64);
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/*
+ * qib_write_umem64 - write a 64-bit quantity to the chip from user space
+ * @dd: the qlogic_ib device
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @uaddr: the source of the data in user memory
+ * @count: the number of bytes to copy (multiple of 32 bits)
+ *
+ * This is usually used for a single qword
+ * NOTE: This assumes the chip address is 64-bit aligned.
+ */
+
+static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
+ const void __user *uaddr, size_t count)
+{
+ u64 __iomem *reg_addr;
+ const u64 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u64));
+
+ /* not very efficient, but it works for now */
+ while (reg_addr < reg_end) {
+ u64 data;
+ if (copy_from_user(&data, uaddr, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ writeq(data, reg_addr);
+
+ reg_addr++;
+ uaddr += sizeof(u64);
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/*
+ * qib_read_umem32 - read a 32-bit quantity from the chip into user space
+ * @dd: the qlogic_ib device
+ * @uaddr: the location to store the data in user memory
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @count: number of bytes to copy
+ *
+ * read 32 bit values, not 64 bit; for memories that only
+ * support 32 bit reads; usually a single dword.
+ */
+static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
+ u32 regoffs, size_t count)
+{
+ const u32 __iomem *reg_addr;
+ const u32 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u32));
+
+ /* not very efficient, but it works for now */
+ while (reg_addr < reg_end) {
+ u32 data = readl(reg_addr);
+
+ if (copy_to_user(uaddr, &data, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ reg_addr++;
+ uaddr += sizeof(u32);
+
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/*
+ * qib_write_umem32 - write a 32-bit quantity to the chip from user space
+ * @dd: the qlogic_ib device
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @uaddr: the source of the data in user memory
+ * @count: number of bytes to copy
+ *
+ * write 32 bit values, not 64 bit; for memories that only
+ * support 32 bit write; usually a single dword.
+ */
+
+static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
+ const void __user *uaddr, size_t count)
+{
+ u32 __iomem *reg_addr;
+ const u32 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u32));
+
+ while (reg_addr < reg_end) {
+ u32 data;
+
+ if (copy_from_user(&data, uaddr, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ writel(data, reg_addr);
+
+ reg_addr++;
+ uaddr += sizeof(u32);
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+static int qib_diag_open(struct inode *in, struct file *fp)
+{
+ int unit = iminor(in) - QIB_DIAG_MINOR_BASE;
+ struct qib_devdata *dd;
+ struct qib_diag_client *dc;
+ int ret;
+
+ mutex_lock(&qib_mutex);
+
+ dd = qib_lookup(unit);
+
+ if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
+ !dd->kregbase) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ dc = get_client(dd);
+ if (!dc) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ dc->next = dd->diag_client;
+ dd->diag_client = dc;
+ fp->private_data = dc;
+ ret = 0;
+bail:
+ mutex_unlock(&qib_mutex);
+
+ return ret;
+}
+
+/**
+ * qib_diagpkt_write - write an IB packet
+ * @fp: the diag data device file pointer
+ * @data: qib_diag_pkt structure saying where to get the packet
+ * @count: size of data to write
+ * @off: unused by this code
+ */
+static ssize_t qib_diagpkt_write(struct file *fp,
+ const char __user *data,
+ size_t count, loff_t *off)
+{
+ u32 __iomem *piobuf;
+ u32 plen, clen, pbufn;
+ struct qib_diag_xpkt dp;
+ u32 *tmpbuf = NULL;
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ ssize_t ret = 0;
+
+ if (count != sizeof(dp)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (copy_from_user(&dp, data, sizeof(dp))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ dd = qib_lookup(dp.unit);
+ if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
+ ret = -ENODEV;
+ goto bail;
+ }
+ if (!(dd->flags & QIB_INITTED)) {
+ /* no hardware, freeze, etc. */
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ if (dp.version != _DIAG_XPKT_VERS) {
+ qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
+ dp.version);
+ ret = -EINVAL;
+ goto bail;
+ }
+ /* send count must be an exact number of dwords */
+ if (dp.len & 3) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (!dp.port || dp.port > dd->num_pports) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ ppd = &dd->pport[dp.port - 1];
+
+ /* need total length before first word written */
+ /* +1 word is for the qword padding */
+ plen = sizeof(u32) + dp.len;
+ clen = dp.len >> 2;
+
+ if ((plen + 4) > ppd->ibmaxlen) {
+ ret = -EINVAL;
+ goto bail; /* before writing pbc */
+ }
+ tmpbuf = vmalloc(plen);
+ if (!tmpbuf) {
+ qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, "
+ "failing\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ if (copy_from_user(tmpbuf,
+ (const void __user *) (unsigned long) dp.data,
+ dp.len)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ plen >>= 2; /* in dwords */
+
+ if (dp.pbc_wd == 0)
+ dp.pbc_wd = plen;
+
+ piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
+ if (!piobuf) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ /* disarm it just to be extra sure */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
+
+ /* disable header check on pbufn for this packet */
+ dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
+
+ writeq(dp.pbc_wd, piobuf);
+ /*
+ * Copy all but the trigger word, then flush, so it's written
+ * to chip before trigger word, then write trigger word, then
+ * flush again, so packet is sent.
+ */
+ if (dd->flags & QIB_PIO_FLUSH_WC) {
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, tmpbuf, clen - 1);
+ qib_flush_wc();
+ __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
+ } else
+ qib_pio_copy(piobuf + 2, tmpbuf, clen);
+
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+
+ /*
+ * Ensure buffer is written to the chip, then re-enable
+ * header checks (if supported by chip). The txchk
+ * code will ensure seen by chip before returning.
+ */
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pbufn);
+ dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
+
+ ret = sizeof(dp);
+
+bail:
+ vfree(tmpbuf);
+ return ret;
+}
+
+static int qib_diag_release(struct inode *in, struct file *fp)
+{
+ mutex_lock(&qib_mutex);
+ return_client(fp->private_data);
+ fp->private_data = NULL;
+ mutex_unlock(&qib_mutex);
+ return 0;
+}
+
+/*
+ * Chip-specific code calls to register its interest in
+ * a specific range.
+ */
+struct diag_observer_list_elt {
+ struct diag_observer_list_elt *next;
+ const struct diag_observer *op;
+};
+
+int qib_register_observer(struct qib_devdata *dd,
+ const struct diag_observer *op)
+{
+ struct diag_observer_list_elt *olp;
+ int ret = -EINVAL;
+
+ if (!dd || !op)
+ goto bail;
+ ret = -ENOMEM;
+ olp = vmalloc(sizeof *olp);
+ if (!olp) {
+ printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n");
+ goto bail;
+ }
+ if (olp) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ olp->op = op;
+ olp->next = dd->diag_observer_list;
+ dd->diag_observer_list = olp;
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ ret = 0;
+ }
+bail:
+ return ret;
+}
+
+/* Remove all registered observers when device is closed */
+static void qib_unregister_observers(struct qib_devdata *dd)
+{
+ struct diag_observer_list_elt *olp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ olp = dd->diag_observer_list;
+ while (olp) {
+ /* Pop one observer, let go of lock */
+ dd->diag_observer_list = olp->next;
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ vfree(olp);
+ /* try again. */
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ olp = dd->diag_observer_list;
+ }
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+}
+
+/*
+ * Find the observer, if any, for the specified address. Initial implementation
+ * is simple stack of observers. This must be called with diag transaction
+ * lock held.
+ */
+static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
+ u32 addr)
+{
+ struct diag_observer_list_elt *olp;
+ const struct diag_observer *op = NULL;
+
+ olp = dd->diag_observer_list;
+ while (olp) {
+ op = olp->op;
+ if (addr >= op->bottom && addr <= op->top)
+ break;
+ olp = olp->next;
+ }
+ if (!olp)
+ op = NULL;
+
+ return op;
+}
+
+static ssize_t qib_diag_read(struct file *fp, char __user *data,
+ size_t count, loff_t *off)
+{
+ struct qib_diag_client *dc = fp->private_data;
+ struct qib_devdata *dd = dc->dd;
+ void __iomem *kreg_base;
+ ssize_t ret;
+
+ if (dc->pid != current->pid) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ kreg_base = dd->kregbase;
+
+ if (count == 0)
+ ret = 0;
+ else if ((count % 4) || (*off % 4))
+ /* address or length is not 32-bit aligned, hence invalid */
+ ret = -EINVAL;
+ else if (dc->state < READY && (*off || count != 8))
+ ret = -EINVAL; /* prevent cat /dev/qib_diag* */
+ else {
+ unsigned long flags;
+ u64 data64 = 0;
+ int use_32;
+ const struct diag_observer *op;
+
+ use_32 = (count % 8) || (*off % 8);
+ ret = -1;
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ /*
+ * Check for observer on this address range.
+ * we only support a single 32 or 64-bit read
+ * via observer, currently.
+ */
+ op = diag_get_observer(dd, *off);
+ if (op) {
+ u32 offset = *off;
+ ret = op->hook(dd, op, offset, &data64, 0, use_32);
+ }
+ /*
+ * We need to release lock before any copy_to_user(),
+ * whether implicit in qib_read_umem* or explicit below.
+ */
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ if (!op) {
+ if (use_32)
+ /*
+ * Address or length is not 64-bit aligned;
+ * do 32-bit rd
+ */
+ ret = qib_read_umem32(dd, data, (u32) *off,
+ count);
+ else
+ ret = qib_read_umem64(dd, data, (u32) *off,
+ count);
+ } else if (ret == count) {
+ /* Below finishes case where observer existed */
+ ret = copy_to_user(data, &data64, use_32 ?
+ sizeof(u32) : sizeof(u64));
+ if (ret)
+ ret = -EFAULT;
+ }
+ }
+
+ if (ret >= 0) {
+ *off += count;
+ ret = count;
+ if (dc->state == OPENED)
+ dc->state = INIT;
+ }
+bail:
+ return ret;
+}
+
+static ssize_t qib_diag_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off)
+{
+ struct qib_diag_client *dc = fp->private_data;
+ struct qib_devdata *dd = dc->dd;
+ void __iomem *kreg_base;
+ ssize_t ret;
+
+ if (dc->pid != current->pid) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ kreg_base = dd->kregbase;
+
+ if (count == 0)
+ ret = 0;
+ else if ((count % 4) || (*off % 4))
+ /* address or length is not 32-bit aligned, hence invalid */
+ ret = -EINVAL;
+ else if (dc->state < READY &&
+ ((*off || count != 8) || dc->state != INIT))
+ /* No writes except second-step of init seq */
+ ret = -EINVAL; /* before any other write allowed */
+ else {
+ unsigned long flags;
+ const struct diag_observer *op = NULL;
+ int use_32 = (count % 8) || (*off % 8);
+
+ /*
+ * Check for observer on this address range.
+ * We only support a single 32 or 64-bit write
+ * via observer, currently. This helps, because
+ * we would otherwise have to jump through hoops
+ * to make "diag transaction" meaningful when we
+ * cannot do a copy_from_user while holding the lock.
+ */
+ if (count == 4 || count == 8) {
+ u64 data64;
+ u32 offset = *off;
+ ret = copy_from_user(&data64, data, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ op = diag_get_observer(dd, *off);
+ if (op)
+ ret = op->hook(dd, op, offset, &data64, ~0Ull,
+ use_32);
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ }
+
+ if (!op) {
+ if (use_32)
+ /*
+ * Address or length is not 64-bit aligned;
+ * do 32-bit write
+ */
+ ret = qib_write_umem32(dd, (u32) *off, data,
+ count);
+ else
+ ret = qib_write_umem64(dd, (u32) *off, data,
+ count);
+ }
+ }
+
+ if (ret >= 0) {
+ *off += count;
+ ret = count;
+ if (dc->state == INIT)
+ dc->state = READY; /* all read/write OK now */
+ }
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c
new file mode 100644
index 000000000000..2920bb39a65b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_dma.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+#include "qib_verbs.h"
+
+#define BAD_DMA_ADDRESS ((u64) 0)
+
+/*
+ * The following functions implement driver specific replacements
+ * for the ib_dma_*() functions.
+ *
+ * These functions return kernel virtual addresses instead of
+ * device bus addresses since the driver uses the CPU to copy
+ * data instead of using hardware DMA.
+ */
+
+static int qib_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == BAD_DMA_ADDRESS;
+}
+
+static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ return (u64) cpu_addr;
+}
+
+static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ if (offset + size > PAGE_SIZE) {
+ addr = BAD_DMA_ADDRESS;
+ goto done;
+ }
+
+ addr = (u64) page_address(page);
+ if (addr)
+ addr += offset;
+ /* TODO: handle highmem pages */
+
+done:
+ return addr;
+}
+
+static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ u64 addr;
+ int i;
+ int ret = nents;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ for_each_sg(sgl, sg, nents, i) {
+ addr = (u64) page_address(sg_page(sg));
+ /* TODO: handle highmem pages */
+ if (!addr) {
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+static void qib_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
+{
+ u64 addr = (u64) page_address(sg_page(sg));
+
+ if (addr)
+ addr += sg->offset;
+ return addr;
+}
+
+static unsigned int qib_sg_dma_len(struct ib_device *dev,
+ struct scatterlist *sg)
+{
+ return sg->length;
+}
+
+static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static void qib_sync_single_for_device(struct ib_device *dev, u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p)
+ addr = page_address(p);
+ if (dma_handle)
+ *dma_handle = (u64) addr;
+ return addr;
+}
+
+static void qib_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, u64 dma_handle)
+{
+ free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+struct ib_dma_mapping_ops qib_dma_mapping_ops = {
+ .mapping_error = qib_mapping_error,
+ .map_single = qib_dma_map_single,
+ .unmap_single = qib_dma_unmap_single,
+ .map_page = qib_dma_map_page,
+ .unmap_page = qib_dma_unmap_page,
+ .map_sg = qib_map_sg,
+ .unmap_sg = qib_unmap_sg,
+ .dma_address = qib_sg_dma_address,
+ .dma_len = qib_sg_dma_len,
+ .sync_single_for_cpu = qib_sync_single_for_cpu,
+ .sync_single_for_device = qib_sync_single_for_device,
+ .alloc_coherent = qib_dma_alloc_coherent,
+ .free_coherent = qib_dma_free_coherent
+};
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
new file mode 100644
index 000000000000..f15ce076ac49
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * The size has to be longer than this string, so we can append
+ * board/chip information to it in the init code.
+ */
+const char ib_qib_version[] = QIB_IDSTR "\n";
+
+DEFINE_SPINLOCK(qib_devs_lock);
+LIST_HEAD(qib_dev_list);
+DEFINE_MUTEX(qib_mutex); /* general driver use */
+
+unsigned qib_ibmtu;
+module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
+MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
+
+unsigned qib_compat_ddr_negotiate = 1;
+module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
+ S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(compat_ddr_negotiate,
+ "Attempt pre-IBTA 1.2 DDR speed negotiation");
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("QLogic <support@qlogic.com>");
+MODULE_DESCRIPTION("QLogic IB driver");
+
+/*
+ * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
+ * PIO send buffers. This is well beyond anything currently
+ * defined in the InfiniBand spec.
+ */
+#define QIB_PIO_MAXIBHDR 128
+
+struct qlogic_ib_stats qib_stats;
+
+const char *qib_get_unit_name(int unit)
+{
+ static char iname[16];
+
+ snprintf(iname, sizeof iname, "infinipath%u", unit);
+ return iname;
+}
+
+/*
+ * Return count of units with at least one port ACTIVE.
+ */
+int qib_count_active_units(void)
+{
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+ int pidx, nunits_active = 0;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ list_for_each_entry(dd, &qib_dev_list, list) {
+ if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
+ continue;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
+ QIBL_LINKARMED | QIBL_LINKACTIVE))) {
+ nunits_active++;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+ return nunits_active;
+}
+
+/*
+ * Return count of all units, optionally return in arguments
+ * the number of usable (present) units, and the number of
+ * ports that are up.
+ */
+int qib_count_units(int *npresentp, int *nupp)
+{
+ int nunits = 0, npresent = 0, nup = 0;
+ struct qib_devdata *dd;
+ unsigned long flags;
+ int pidx;
+ struct qib_pportdata *ppd;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+
+ list_for_each_entry(dd, &qib_dev_list, list) {
+ nunits++;
+ if ((dd->flags & QIB_PRESENT) && dd->kregbase)
+ npresent++;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
+ QIBL_LINKARMED | QIBL_LINKACTIVE)))
+ nup++;
+ }
+ }
+
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ if (npresentp)
+ *npresentp = npresent;
+ if (nupp)
+ *nupp = nup;
+
+ return nunits;
+}
+
+/**
+ * qib_wait_linkstate - wait for an IB link state change to occur
+ * @dd: the qlogic_ib device
+ * @state: the state to wait for
+ * @msecs: the number of milliseconds to wait
+ *
+ * wait up to msecs milliseconds for IB link state change to occur for
+ * now, take the easy polling route. Currently used only by
+ * qib_set_linkstate. Returns 0 if state reached, otherwise
+ * -ETIMEDOUT state can have multiple states set, for any of several
+ * transitions.
+ */
+int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ if (ppd->state_wanted) {
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ ret = -EBUSY;
+ goto bail;
+ }
+ ppd->state_wanted = state;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ wait_event_interruptible_timeout(ppd->state_wait,
+ (ppd->lflags & state),
+ msecs_to_jiffies(msecs));
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->state_wanted = 0;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ if (!(ppd->lflags & state))
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+bail:
+ return ret;
+}
+
+int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
+{
+ u32 lstate;
+ int ret;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ switch (newstate) {
+ case QIB_IB_LINKDOWN_ONLY:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKDOWN:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKDOWN_SLEEP:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKDOWN_DISABLE:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKARM:
+ if (ppd->lflags & QIBL_LINKARMED) {
+ ret = 0;
+ goto bail;
+ }
+ if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ /*
+ * Since the port can be ACTIVE when we ask for ARMED,
+ * clear QIBL_LINKV so we can wait for a transition.
+ * If the link isn't ARMED, then something else happened
+ * and there is no point waiting for ARMED.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_LINKV;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
+ lstate = QIBL_LINKV;
+ break;
+
+ case QIB_IB_LINKACTIVE:
+ if (ppd->lflags & QIBL_LINKACTIVE) {
+ ret = 0;
+ goto bail;
+ }
+ if (!(ppd->lflags & QIBL_LINKARMED)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
+ lstate = QIBL_LINKACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+ ret = qib_wait_linkstate(ppd, lstate, 10);
+
+bail:
+ return ret;
+}
+
+/*
+ * Get address of eager buffer from it's index (allocated in chunks, not
+ * contiguous).
+ */
+static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
+{
+ const u32 chunk = etail / rcd->rcvegrbufs_perchunk;
+ const u32 idx = etail % rcd->rcvegrbufs_perchunk;
+
+ return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize;
+}
+
+/*
+ * Returns 1 if error was a CRC, else 0.
+ * Needed for some chip's synthesized error counters.
+ */
+static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
+ u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
+ struct qib_message_header *hdr)
+{
+ u32 ret = 0;
+
+ if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
+ ret = 1;
+ return ret;
+}
+
+/*
+ * qib_kreceive - receive a packet
+ * @rcd: the qlogic_ib context
+ * @llic: gets count of good packets needed to clear lli,
+ * (used with chips that need need to track crcs for lli)
+ *
+ * called from interrupt handler for errors or receive interrupt
+ * Returns number of CRC error packets, needed by some chips for
+ * local link integrity tracking. crcs are adjusted down by following
+ * good packets, if any, and count of good packets is also tracked.
+ */
+u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
+{
+ struct qib_devdata *dd = rcd->dd;
+ struct qib_pportdata *ppd = rcd->ppd;
+ __le32 *rhf_addr;
+ void *ebuf;
+ const u32 rsize = dd->rcvhdrentsize; /* words */
+ const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
+ u32 etail = -1, l, hdrqtail;
+ struct qib_message_header *hdr;
+ u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
+ int last;
+ u64 lval;
+ struct qib_qp *qp, *nqp;
+
+ l = rcd->head;
+ rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
+ if (dd->flags & QIB_NODMA_RTAIL) {
+ u32 seq = qib_hdrget_seq(rhf_addr);
+ if (seq != rcd->seq_cnt)
+ goto bail;
+ hdrqtail = 0;
+ } else {
+ hdrqtail = qib_get_rcvhdrtail(rcd);
+ if (l == hdrqtail)
+ goto bail;
+ smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
+ }
+
+ for (last = 0, i = 1; !last; i += !last) {
+ hdr = dd->f_get_msgheader(dd, rhf_addr);
+ eflags = qib_hdrget_err_flags(rhf_addr);
+ etype = qib_hdrget_rcv_type(rhf_addr);
+ /* total length */
+ tlen = qib_hdrget_length_in_bytes(rhf_addr);
+ ebuf = NULL;
+ if ((dd->flags & QIB_NODMA_RTAIL) ?
+ qib_hdrget_use_egr_buf(rhf_addr) :
+ (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
+ etail = qib_hdrget_index(rhf_addr);
+ updegr = 1;
+ if (tlen > sizeof(*hdr) ||
+ etype >= RCVHQ_RCV_TYPE_NON_KD)
+ ebuf = qib_get_egrbuf(rcd, etail);
+ }
+ if (!eflags) {
+ u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
+
+ if (lrh_len != tlen) {
+ qib_stats.sps_lenerrs++;
+ goto move_along;
+ }
+ }
+ if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
+ ebuf == NULL &&
+ tlen > (dd->rcvhdrentsize - 2 + 1 -
+ qib_hdrget_offset(rhf_addr)) << 2) {
+ goto move_along;
+ }
+
+ /*
+ * Both tiderr and qibhdrerr are set for all plain IB
+ * packets; only qibhdrerr should be set.
+ */
+ if (unlikely(eflags))
+ crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
+ etail, rhf_addr, hdr);
+ else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
+ qib_ib_rcv(rcd, hdr, ebuf, tlen);
+ if (crcs)
+ crcs--;
+ else if (llic && *llic)
+ --*llic;
+ }
+move_along:
+ l += rsize;
+ if (l >= maxcnt)
+ l = 0;
+ rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
+ if (dd->flags & QIB_NODMA_RTAIL) {
+ u32 seq = qib_hdrget_seq(rhf_addr);
+
+ if (++rcd->seq_cnt > 13)
+ rcd->seq_cnt = 1;
+ if (seq != rcd->seq_cnt)
+ last = 1;
+ } else if (l == hdrqtail)
+ last = 1;
+ /*
+ * Update head regs etc., every 16 packets, if not last pkt,
+ * to help prevent rcvhdrq overflows, when many packets
+ * are processed and queue is nearly full.
+ * Don't request an interrupt for intermediate updates.
+ */
+ lval = l;
+ if (!last && !(i & 0xf)) {
+ dd->f_update_usrhead(rcd, lval, updegr, etail);
+ updegr = 0;
+ }
+ }
+
+ rcd->head = l;
+ rcd->pkt_count += i;
+
+ /*
+ * Iterate over all QPs waiting to respond.
+ * The list won't change since the IRQ is only run on one CPU.
+ */
+ list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
+ list_del_init(&qp->rspwait);
+ if (qp->r_flags & QIB_R_RSP_NAK) {
+ qp->r_flags &= ~QIB_R_RSP_NAK;
+ qib_send_rc_ack(qp);
+ }
+ if (qp->r_flags & QIB_R_RSP_SEND) {
+ unsigned long flags;
+
+ qp->r_flags &= ~QIB_R_RSP_SEND;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_qib_state_ops[qp->state] &
+ QIB_PROCESS_OR_FLUSH_SEND)
+ qib_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+
+bail:
+ /* Report number of packets consumed */
+ if (npkts)
+ *npkts = i;
+
+ /*
+ * Always write head at end, and setup rcv interrupt, even
+ * if no packets were processed.
+ */
+ lval = (u64)rcd->head | dd->rhdrhead_intr_off;
+ dd->f_update_usrhead(rcd, lval, updegr, etail);
+ return crcs;
+}
+
+/**
+ * qib_set_mtu - set the MTU
+ * @ppd: the perport data
+ * @arg: the new MTU
+ *
+ * We can handle "any" incoming size, the issue here is whether we
+ * need to restrict our outgoing size. For now, we don't do any
+ * sanity checking on this, and we don't deal with what happens to
+ * programs that are already running when the size changes.
+ * NOTE: changing the MTU will usually cause the IBC to go back to
+ * link INIT state...
+ */
+int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
+{
+ u32 piosize;
+ int ret, chk;
+
+ if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
+ arg != 4096) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ chk = ib_mtu_enum_to_int(qib_ibmtu);
+ if (chk > 0 && arg > chk) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ piosize = ppd->ibmaxlen;
+ ppd->ibmtu = arg;
+
+ if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
+ /* Only if it's not the initial value (or reset to it) */
+ if (piosize != ppd->init_ibmaxlen) {
+ if (arg > piosize && arg <= ppd->init_ibmaxlen)
+ piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
+ ppd->ibmaxlen = piosize;
+ }
+ } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
+ piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
+ ppd->ibmaxlen = piosize;
+ }
+
+ ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
+{
+ struct qib_devdata *dd = ppd->dd;
+ ppd->lid = lid;
+ ppd->lmc = lmc;
+
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
+ lid | (~((1U << lmc) - 1)) << 16);
+
+ qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
+ dd->unit, ppd->port, lid);
+
+ return 0;
+}
+
+/*
+ * Following deal with the "obviously simple" task of overriding the state
+ * of the LEDS, which normally indicate link physical and logical status.
+ * The complications arise in dealing with different hardware mappings
+ * and the board-dependent routine being called from interrupts.
+ * and then there's the requirement to _flash_ them.
+ */
+#define LED_OVER_FREQ_SHIFT 8
+#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
+/* Below is "non-zero" to force override, but both actual LEDs are off */
+#define LED_OVER_BOTH_OFF (8)
+
+static void qib_run_led_override(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_devdata *dd = ppd->dd;
+ int timeoff;
+ int ph_idx;
+
+ if (!(dd->flags & QIB_INITTED))
+ return;
+
+ ph_idx = ppd->led_override_phase++ & 1;
+ ppd->led_override = ppd->led_override_vals[ph_idx];
+ timeoff = ppd->led_override_timeoff;
+
+ dd->f_setextled(ppd, 1);
+ /*
+ * don't re-fire the timer if user asked for it to be off; we let
+ * it fire one more time after they turn it off to simplify
+ */
+ if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
+ mod_timer(&ppd->led_override_timer, jiffies + timeoff);
+}
+
+void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int timeoff, freq;
+
+ if (!(dd->flags & QIB_INITTED))
+ return;
+
+ /* First check if we are blinking. If not, use 1HZ polling */
+ timeoff = HZ;
+ freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
+
+ if (freq) {
+ /* For blink, set each phase from one nybble of val */
+ ppd->led_override_vals[0] = val & 0xF;
+ ppd->led_override_vals[1] = (val >> 4) & 0xF;
+ timeoff = (HZ << 4)/freq;
+ } else {
+ /* Non-blink set both phases the same. */
+ ppd->led_override_vals[0] = val & 0xF;
+ ppd->led_override_vals[1] = val & 0xF;
+ }
+ ppd->led_override_timeoff = timeoff;
+
+ /*
+ * If the timer has not already been started, do so. Use a "quick"
+ * timeout so the function will be called soon, to look at our request.
+ */
+ if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
+ /* Need to start timer */
+ init_timer(&ppd->led_override_timer);
+ ppd->led_override_timer.function = qib_run_led_override;
+ ppd->led_override_timer.data = (unsigned long) ppd;
+ ppd->led_override_timer.expires = jiffies + 1;
+ add_timer(&ppd->led_override_timer);
+ } else {
+ if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
+ mod_timer(&ppd->led_override_timer, jiffies + 1);
+ atomic_dec(&ppd->led_override_timer_active);
+ }
+}
+
+/**
+ * qib_reset_device - reset the chip if possible
+ * @unit: the device to reset
+ *
+ * Whether or not reset is successful, we attempt to re-initialize the chip
+ * (that is, much like a driver unload/reload). We clear the INITTED flag
+ * so that the various entry points will fail until we reinitialize. For
+ * now, we only allow this if no user contexts are open that use chip resources
+ */
+int qib_reset_device(int unit)
+{
+ int ret, i;
+ struct qib_devdata *dd = qib_lookup(unit);
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+ int pidx;
+
+ if (!dd) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
+
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
+ qib_devinfo(dd->pcidev, "Invalid unit number %u or "
+ "not initialized or not present\n", unit);
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ if (dd->rcd)
+ for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
+ if (!dd->rcd[i] || !dd->rcd[i]->cnt)
+ continue;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ ret = -EBUSY;
+ goto bail;
+ }
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (atomic_read(&ppd->led_override_timer_active)) {
+ /* Need to stop LED timer, _then_ shut off LEDs */
+ del_timer_sync(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 0);
+ }
+
+ /* Shut off LEDs after we are sure timer is not running */
+ ppd->led_override = LED_OVER_BOTH_OFF;
+ dd->f_setextled(ppd, 0);
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ qib_teardown_sdma(ppd);
+ }
+
+ ret = dd->f_reset(dd);
+ if (ret == 1)
+ ret = qib_init(dd, 1);
+ else
+ ret = -EAGAIN;
+ if (ret)
+ qib_dev_err(dd, "Reinitialize unit %u after "
+ "reset failed with %d\n", unit, ret);
+ else
+ qib_devinfo(dd->pcidev, "Reinitialized unit %u after "
+ "resetting\n", unit);
+
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
new file mode 100644
index 000000000000..92d9cfe98a68
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * Functions specific to the serial EEPROM on cards handled by ib_qib.
+ * The actual serail interface code is in qib_twsi.c. This file is a client
+ */
+
+/**
+ * qib_eeprom_read - receives bytes from the eeprom via I2C
+ * @dd: the qlogic_ib device
+ * @eeprom_offset: address to read from
+ * @buffer: where to store result
+ * @len: number of bytes to receive
+ */
+int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
+ void *buff, int len)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (!ret) {
+ ret = qib_twsi_reset(dd);
+ if (ret)
+ qib_dev_err(dd, "EEPROM Reset for read failed\n");
+ else
+ ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev,
+ eeprom_offset, buff, len);
+ mutex_unlock(&dd->eep_lock);
+ }
+
+ return ret;
+}
+
+/*
+ * Actually update the eeprom, first doing write enable if
+ * needed, then restoring write enable state.
+ * Must be called with eep_lock held
+ */
+static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
+ const void *buf, int len)
+{
+ int ret, pwen;
+
+ pwen = dd->f_eeprom_wen(dd, 1);
+ ret = qib_twsi_reset(dd);
+ if (ret)
+ qib_dev_err(dd, "EEPROM Reset for write failed\n");
+ else
+ ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev,
+ offset, buf, len);
+ dd->f_eeprom_wen(dd, pwen);
+ return ret;
+}
+
+/**
+ * qib_eeprom_write - writes data to the eeprom via I2C
+ * @dd: the qlogic_ib device
+ * @eeprom_offset: where to place data
+ * @buffer: data to write
+ * @len: number of bytes to write
+ */
+int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
+ const void *buff, int len)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (!ret) {
+ ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len);
+ mutex_unlock(&dd->eep_lock);
+ }
+
+ return ret;
+}
+
+static u8 flash_csum(struct qib_flash *ifp, int adjust)
+{
+ u8 *ip = (u8 *) ifp;
+ u8 csum = 0, len;
+
+ /*
+ * Limit length checksummed to max length of actual data.
+ * Checksum of erased eeprom will still be bad, but we avoid
+ * reading past the end of the buffer we were passed.
+ */
+ len = ifp->if_length;
+ if (len > sizeof(struct qib_flash))
+ len = sizeof(struct qib_flash);
+ while (len--)
+ csum += *ip++;
+ csum -= ifp->if_csum;
+ csum = ~csum;
+ if (adjust)
+ ifp->if_csum = csum;
+
+ return csum;
+}
+
+/**
+ * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device
+ * @dd: the qlogic_ib device
+ *
+ * We have the capability to use the nguid field, and get
+ * the guid from the first chip's flash, to use for all of them.
+ */
+void qib_get_eeprom_info(struct qib_devdata *dd)
+{
+ void *buf;
+ struct qib_flash *ifp;
+ __be64 guid;
+ int len, eep_stat;
+ u8 csum, *bguid;
+ int t = dd->unit;
+ struct qib_devdata *dd0 = qib_lookup(0);
+
+ if (t && dd0->nguid > 1 && t <= dd0->nguid) {
+ u8 oguid;
+ dd->base_guid = dd0->base_guid;
+ bguid = (u8 *) &dd->base_guid;
+
+ oguid = bguid[7];
+ bguid[7] += t;
+ if (oguid > bguid[7]) {
+ if (bguid[6] == 0xff) {
+ if (bguid[5] == 0xff) {
+ qib_dev_err(dd, "Can't set %s GUID"
+ " from base, wraps to"
+ " OUI!\n",
+ qib_get_unit_name(t));
+ dd->base_guid = 0;
+ goto bail;
+ }
+ bguid[5]++;
+ }
+ bguid[6]++;
+ }
+ dd->nguid = 1;
+ goto bail;
+ }
+
+ /*
+ * Read full flash, not just currently used part, since it may have
+ * been written with a newer definition.
+ * */
+ len = sizeof(struct qib_flash);
+ buf = vmalloc(len);
+ if (!buf) {
+ qib_dev_err(dd, "Couldn't allocate memory to read %u "
+ "bytes from eeprom for GUID\n", len);
+ goto bail;
+ }
+
+ /*
+ * Use "public" eeprom read function, which does locking and
+ * figures out device. This will migrate to chip-specific.
+ */
+ eep_stat = qib_eeprom_read(dd, 0, buf, len);
+
+ if (eep_stat) {
+ qib_dev_err(dd, "Failed reading GUID from eeprom\n");
+ goto done;
+ }
+ ifp = (struct qib_flash *)buf;
+
+ csum = flash_csum(ifp, 0);
+ if (csum != ifp->if_csum) {
+ qib_devinfo(dd->pcidev, "Bad I2C flash checksum: "
+ "0x%x, not 0x%x\n", csum, ifp->if_csum);
+ goto done;
+ }
+ if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
+ *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
+ qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n",
+ *(unsigned long long *) ifp->if_guid);
+ /* don't allow GUID if all 0 or all 1's */
+ goto done;
+ }
+
+ /* complain, but allow it */
+ if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
+ qib_devinfo(dd->pcidev, "Warning, GUID %llx is "
+ "default, probably not correct!\n",
+ *(unsigned long long *) ifp->if_guid);
+
+ bguid = ifp->if_guid;
+ if (!bguid[0] && !bguid[1] && !bguid[2]) {
+ /*
+ * Original incorrect GUID format in flash; fix in
+ * core copy, by shifting up 2 octets; don't need to
+ * change top octet, since both it and shifted are 0.
+ */
+ bguid[1] = bguid[3];
+ bguid[2] = bguid[4];
+ bguid[3] = 0;
+ bguid[4] = 0;
+ guid = *(__be64 *) ifp->if_guid;
+ } else
+ guid = *(__be64 *) ifp->if_guid;
+ dd->base_guid = guid;
+ dd->nguid = ifp->if_numguid;
+ /*
+ * Things are slightly complicated by the desire to transparently
+ * support both the Pathscale 10-digit serial number and the QLogic
+ * 13-character version.
+ */
+ if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] &&
+ ((u8 *) ifp->if_sprefix)[0] != 0xFF) {
+ char *snp = dd->serial;
+
+ /*
+ * This board has a Serial-prefix, which is stored
+ * elsewhere for backward-compatibility.
+ */
+ memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
+ snp[sizeof ifp->if_sprefix] = '\0';
+ len = strlen(snp);
+ snp += len;
+ len = (sizeof dd->serial) - len;
+ if (len > sizeof ifp->if_serial)
+ len = sizeof ifp->if_serial;
+ memcpy(snp, ifp->if_serial, len);
+ } else
+ memcpy(dd->serial, ifp->if_serial,
+ sizeof ifp->if_serial);
+ if (!strstr(ifp->if_comment, "Tested successfully"))
+ qib_dev_err(dd, "Board SN %s did not pass functional "
+ "test: %s\n", dd->serial, ifp->if_comment);
+
+ memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
+ /*
+ * Power-on (actually "active") hours are kept as little-endian value
+ * in EEPROM, but as seconds in a (possibly as small as 24-bit)
+ * atomic_t while running.
+ */
+ atomic_set(&dd->active_time, 0);
+ dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
+
+done:
+ vfree(buf);
+
+bail:;
+}
+
+/**
+ * qib_update_eeprom_log - copy active-time and error counters to eeprom
+ * @dd: the qlogic_ib device
+ *
+ * Although the time is kept as seconds in the qib_devdata struct, it is
+ * rounded to hours for re-write, as we have only 16 bits in EEPROM.
+ * First-cut code reads whole (expected) struct qib_flash, modifies,
+ * re-writes. Future direction: read/write only what we need, assuming
+ * that the EEPROM had to have been "good enough" for driver init, and
+ * if not, we aren't making it worse.
+ *
+ */
+int qib_update_eeprom_log(struct qib_devdata *dd)
+{
+ void *buf;
+ struct qib_flash *ifp;
+ int len, hi_water;
+ uint32_t new_time, new_hrs;
+ u8 csum;
+ int ret, idx;
+ unsigned long flags;
+
+ /* first, check if we actually need to do anything. */
+ ret = 0;
+ for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+ if (dd->eep_st_new_errs[idx]) {
+ ret = 1;
+ break;
+ }
+ }
+ new_time = atomic_read(&dd->active_time);
+
+ if (ret == 0 && new_time < 3600)
+ goto bail;
+
+ /*
+ * The quick-check above determined that there is something worthy
+ * of logging, so get current contents and do a more detailed idea.
+ * read full flash, not just currently used part, since it may have
+ * been written with a newer definition
+ */
+ len = sizeof(struct qib_flash);
+ buf = vmalloc(len);
+ ret = 1;
+ if (!buf) {
+ qib_dev_err(dd, "Couldn't allocate memory to read %u "
+ "bytes from eeprom for logging\n", len);
+ goto bail;
+ }
+
+ /* Grab semaphore and read current EEPROM. If we get an
+ * error, let go, but if not, keep it until we finish write.
+ */
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret) {
+ qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
+ goto free_bail;
+ }
+ ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
+ if (ret) {
+ mutex_unlock(&dd->eep_lock);
+ qib_dev_err(dd, "Unable read EEPROM for logging\n");
+ goto free_bail;
+ }
+ ifp = (struct qib_flash *)buf;
+
+ csum = flash_csum(ifp, 0);
+ if (csum != ifp->if_csum) {
+ mutex_unlock(&dd->eep_lock);
+ qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
+ csum, ifp->if_csum);
+ ret = 1;
+ goto free_bail;
+ }
+ hi_water = 0;
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+ int new_val = dd->eep_st_new_errs[idx];
+ if (new_val) {
+ /*
+ * If we have seen any errors, add to EEPROM values
+ * We need to saturate at 0xFF (255) and we also
+ * would need to adjust the checksum if we were
+ * trying to minimize EEPROM traffic
+ * Note that we add to actual current count in EEPROM,
+ * in case it was altered while we were running.
+ */
+ new_val += ifp->if_errcntp[idx];
+ if (new_val > 0xFF)
+ new_val = 0xFF;
+ if (ifp->if_errcntp[idx] != new_val) {
+ ifp->if_errcntp[idx] = new_val;
+ hi_water = offsetof(struct qib_flash,
+ if_errcntp) + idx;
+ }
+ /*
+ * update our shadow (used to minimize EEPROM
+ * traffic), to match what we are about to write.
+ */
+ dd->eep_st_errs[idx] = new_val;
+ dd->eep_st_new_errs[idx] = 0;
+ }
+ }
+ /*
+ * Now update active-time. We would like to round to the nearest hour
+ * but unless atomic_t are sure to be proper signed ints we cannot,
+ * because we need to account for what we "transfer" to EEPROM and
+ * if we log an hour at 31 minutes, then we would need to set
+ * active_time to -29 to accurately count the _next_ hour.
+ */
+ if (new_time >= 3600) {
+ new_hrs = new_time / 3600;
+ atomic_sub((new_hrs * 3600), &dd->active_time);
+ new_hrs += dd->eep_hrs;
+ if (new_hrs > 0xFFFF)
+ new_hrs = 0xFFFF;
+ dd->eep_hrs = new_hrs;
+ if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
+ ifp->if_powerhour[0] = new_hrs & 0xFF;
+ hi_water = offsetof(struct qib_flash, if_powerhour);
+ }
+ if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
+ ifp->if_powerhour[1] = new_hrs >> 8;
+ hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
+ }
+ }
+ /*
+ * There is a tiny possibility that we could somehow fail to write
+ * the EEPROM after updating our shadows, but problems from holding
+ * the spinlock too long are a much bigger issue.
+ */
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+ if (hi_water) {
+ /* we made some change to the data, uopdate cksum and write */
+ csum = flash_csum(ifp, 1);
+ ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
+ }
+ mutex_unlock(&dd->eep_lock);
+ if (ret)
+ qib_dev_err(dd, "Failed updating EEPROM\n");
+
+free_bail:
+ vfree(buf);
+bail:
+ return ret;
+}
+
+/**
+ * qib_inc_eeprom_err - increment one of the four error counters
+ * that are logged to EEPROM.
+ * @dd: the qlogic_ib device
+ * @eidx: 0..3, the counter to increment
+ * @incr: how much to add
+ *
+ * Each counter is 8-bits, and saturates at 255 (0xFF). They
+ * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
+ * is called, but it can only be called in a context that allows sleep.
+ * This function can be called even at interrupt level.
+ */
+void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
+{
+ uint new_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ new_val = dd->eep_st_new_errs[eidx] + incr;
+ if (new_val > 255)
+ new_val = 255;
+ dd->eep_st_new_errs[eidx] = new_val;
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
new file mode 100644
index 000000000000..a142a9eb5226
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -0,0 +1,2317 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/swap.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/jiffies.h>
+#include <asm/pgtable.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_common.h"
+#include "qib_user_sdma.h"
+
+static int qib_open(struct inode *, struct file *);
+static int qib_close(struct inode *, struct file *);
+static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
+static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
+ unsigned long, loff_t);
+static unsigned int qib_poll(struct file *, struct poll_table_struct *);
+static int qib_mmapf(struct file *, struct vm_area_struct *);
+
+static const struct file_operations qib_file_ops = {
+ .owner = THIS_MODULE,
+ .write = qib_write,
+ .aio_write = qib_aio_write,
+ .open = qib_open,
+ .release = qib_close,
+ .poll = qib_poll,
+ .mmap = qib_mmapf
+};
+
+/*
+ * Convert kernel virtual addresses to physical addresses so they don't
+ * potentially conflict with the chip addresses used as mmap offsets.
+ * It doesn't really matter what mmap offset we use as long as we can
+ * interpret it correctly.
+ */
+static u64 cvt_kvaddr(void *p)
+{
+ struct page *page;
+ u64 paddr = 0;
+
+ page = vmalloc_to_page(p);
+ if (page)
+ paddr = page_to_pfn(page) << PAGE_SHIFT;
+
+ return paddr;
+}
+
+static int qib_get_base_info(struct file *fp, void __user *ubase,
+ size_t ubase_size)
+{
+ struct qib_ctxtdata *rcd = ctxt_fp(fp);
+ int ret = 0;
+ struct qib_base_info *kinfo = NULL;
+ struct qib_devdata *dd = rcd->dd;
+ struct qib_pportdata *ppd = rcd->ppd;
+ unsigned subctxt_cnt;
+ int shared, master;
+ size_t sz;
+
+ subctxt_cnt = rcd->subctxt_cnt;
+ if (!subctxt_cnt) {
+ shared = 0;
+ master = 0;
+ subctxt_cnt = 1;
+ } else {
+ shared = 1;
+ master = !subctxt_fp(fp);
+ }
+
+ sz = sizeof(*kinfo);
+ /* If context sharing is not requested, allow the old size structure */
+ if (!shared)
+ sz -= 7 * sizeof(u64);
+ if (ubase_size < sz) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
+ if (kinfo == NULL) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ ret = dd->f_get_base_info(rcd, kinfo);
+ if (ret < 0)
+ goto bail;
+
+ kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
+ kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
+ kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
+ kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
+ /*
+ * have to mmap whole thing
+ */
+ kinfo->spi_rcv_egrbuftotlen =
+ rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
+ kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
+ kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
+ rcd->rcvegrbuf_chunks;
+ kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
+ if (master)
+ kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
+ /*
+ * for this use, may be cfgctxts summed over all chips that
+ * are are configured and present
+ */
+ kinfo->spi_nctxts = dd->cfgctxts;
+ /* unit (chip/board) our context is on */
+ kinfo->spi_unit = dd->unit;
+ kinfo->spi_port = ppd->port;
+ /* for now, only a single page */
+ kinfo->spi_tid_maxsize = PAGE_SIZE;
+
+ /*
+ * Doing this per context, and based on the skip value, etc. This has
+ * to be the actual buffer size, since the protocol code treats it
+ * as an array.
+ *
+ * These have to be set to user addresses in the user code via mmap.
+ * These values are used on return to user code for the mmap target
+ * addresses only. For 32 bit, same 44 bit address problem, so use
+ * the physical address, not virtual. Before 2.6.11, using the
+ * page_address() macro worked, but in 2.6.11, even that returns the
+ * full 64 bit address (upper bits all 1's). So far, using the
+ * physical addresses (or chip offsets, for chip mapping) works, but
+ * no doubt some future kernel release will change that, and we'll be
+ * on to yet another method of dealing with this.
+ * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
+ * since the chips with non-zero rhf_offset don't normally
+ * enable tail register updates to host memory, but for testing,
+ * both can be enabled and used.
+ */
+ kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
+ kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
+ kinfo->spi_rhf_offset = dd->rhf_offset;
+ kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
+ kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
+ /* setup per-unit (not port) status area for user programs */
+ kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
+ (char *) ppd->statusp -
+ (char *) dd->pioavailregs_dma;
+ kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
+ if (!shared) {
+ kinfo->spi_piocnt = rcd->piocnt;
+ kinfo->spi_piobufbase = (u64) rcd->piobufs;
+ kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
+ } else if (master) {
+ kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
+ (rcd->piocnt % subctxt_cnt);
+ /* Master's PIO buffers are after all the slave's */
+ kinfo->spi_piobufbase = (u64) rcd->piobufs +
+ dd->palign *
+ (rcd->piocnt - kinfo->spi_piocnt);
+ } else {
+ unsigned slave = subctxt_fp(fp) - 1;
+
+ kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
+ kinfo->spi_piobufbase = (u64) rcd->piobufs +
+ dd->palign * kinfo->spi_piocnt * slave;
+ }
+
+ if (shared) {
+ kinfo->spi_sendbuf_status =
+ cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
+ /* only spi_subctxt_* fields should be set in this block! */
+ kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
+
+ kinfo->spi_subctxt_rcvegrbuf =
+ cvt_kvaddr(rcd->subctxt_rcvegrbuf);
+ kinfo->spi_subctxt_rcvhdr_base =
+ cvt_kvaddr(rcd->subctxt_rcvhdr_base);
+ }
+
+ /*
+ * All user buffers are 2KB buffers. If we ever support
+ * giving 4KB buffers to user processes, this will need some
+ * work. Can't use piobufbase directly, because it has
+ * both 2K and 4K buffer base values.
+ */
+ kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
+ dd->palign;
+ kinfo->spi_pioalign = dd->palign;
+ kinfo->spi_qpair = QIB_KD_QP;
+ /*
+ * user mode PIO buffers are always 2KB, even when 4KB can
+ * be received, and sent via the kernel; this is ibmaxlen
+ * for 2K MTU.
+ */
+ kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
+ kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
+ kinfo->spi_ctxt = rcd->ctxt;
+ kinfo->spi_subctxt = subctxt_fp(fp);
+ kinfo->spi_sw_version = QIB_KERN_SWVERSION;
+ kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
+ kinfo->spi_hw_version = dd->revision;
+
+ if (master)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
+
+ sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
+ if (copy_to_user(ubase, kinfo, sz))
+ ret = -EFAULT;
+bail:
+ kfree(kinfo);
+ return ret;
+}
+
+/**
+ * qib_tid_update - update a context TID
+ * @rcd: the context
+ * @fp: the qib device file
+ * @ti: the TID information
+ *
+ * The new implementation as of Oct 2004 is that the driver assigns
+ * the tid and returns it to the caller. To reduce search time, we
+ * keep a cursor for each context, walking the shadow tid array to find
+ * one that's not in use.
+ *
+ * For now, if we can't allocate the full list, we fail, although
+ * in the long run, we'll allocate as many as we can, and the
+ * caller will deal with that by trying the remaining pages later.
+ * That means that when we fail, we have to mark the tids as not in
+ * use again, in our shadow copy.
+ *
+ * It's up to the caller to free the tids when they are done.
+ * We'll unlock the pages as they free them.
+ *
+ * Also, right now we are locking one page at a time, but since
+ * the intended use of this routine is for a single group of
+ * virtually contiguous pages, that should change to improve
+ * performance.
+ */
+static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
+ const struct qib_tid_info *ti)
+{
+ int ret = 0, ntids;
+ u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
+ u16 *tidlist;
+ struct qib_devdata *dd = rcd->dd;
+ u64 physaddr;
+ unsigned long vaddr;
+ u64 __iomem *tidbase;
+ unsigned long tidmap[8];
+ struct page **pagep = NULL;
+ unsigned subctxt = subctxt_fp(fp);
+
+ if (!dd->pageshadow) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ cnt = ti->tidcnt;
+ if (!cnt) {
+ ret = -EFAULT;
+ goto done;
+ }
+ ctxttid = rcd->ctxt * dd->rcvtidcnt;
+ if (!rcd->subctxt_cnt) {
+ tidcnt = dd->rcvtidcnt;
+ tid = rcd->tidcursor;
+ tidoff = 0;
+ } else if (!subctxt) {
+ tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
+ (dd->rcvtidcnt % rcd->subctxt_cnt);
+ tidoff = dd->rcvtidcnt - tidcnt;
+ ctxttid += tidoff;
+ tid = tidcursor_fp(fp);
+ } else {
+ tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
+ tidoff = tidcnt * (subctxt - 1);
+ ctxttid += tidoff;
+ tid = tidcursor_fp(fp);
+ }
+ if (cnt > tidcnt) {
+ /* make sure it all fits in tid_pg_list */
+ qib_devinfo(dd->pcidev, "Process tried to allocate %u "
+ "TIDs, only trying max (%u)\n", cnt, tidcnt);
+ cnt = tidcnt;
+ }
+ pagep = (struct page **) rcd->tid_pg_list;
+ tidlist = (u16 *) &pagep[dd->rcvtidcnt];
+ pagep += tidoff;
+ tidlist += tidoff;
+
+ memset(tidmap, 0, sizeof(tidmap));
+ /* before decrement; chip actual # */
+ ntids = tidcnt;
+ tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
+ dd->rcvtidbase +
+ ctxttid * sizeof(*tidbase));
+
+ /* virtual address of first page in transfer */
+ vaddr = ti->tidvaddr;
+ if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
+ cnt * PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ ret = qib_get_user_pages(vaddr, cnt, pagep);
+ if (ret) {
+ /*
+ * if (ret == -EBUSY)
+ * We can't continue because the pagep array won't be
+ * initialized. This should never happen,
+ * unless perhaps the user has mpin'ed the pages
+ * themselves.
+ */
+ qib_devinfo(dd->pcidev,
+ "Failed to lock addr %p, %u pages: "
+ "errno %d\n", (void *) vaddr, cnt, -ret);
+ goto done;
+ }
+ for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
+ for (; ntids--; tid++) {
+ if (tid == tidcnt)
+ tid = 0;
+ if (!dd->pageshadow[ctxttid + tid])
+ break;
+ }
+ if (ntids < 0) {
+ /*
+ * Oops, wrapped all the way through their TIDs,
+ * and didn't have enough free; see comments at
+ * start of routine
+ */
+ i--; /* last tidlist[i] not filled in */
+ ret = -ENOMEM;
+ break;
+ }
+ tidlist[i] = tid + tidoff;
+ /* we "know" system pages and TID pages are same size */
+ dd->pageshadow[ctxttid + tid] = pagep[i];
+ dd->physshadow[ctxttid + tid] =
+ qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ /*
+ * don't need atomic or it's overhead
+ */
+ __set_bit(tid, tidmap);
+ physaddr = dd->physshadow[ctxttid + tid];
+ /* PERFORMANCE: below should almost certainly be cached */
+ dd->f_put_tid(dd, &tidbase[tid],
+ RCVHQ_RCV_TYPE_EXPECTED, physaddr);
+ /*
+ * don't check this tid in qib_ctxtshadow, since we
+ * just filled it in; start with the next one.
+ */
+ tid++;
+ }
+
+ if (ret) {
+ u32 limit;
+cleanup:
+ /* jump here if copy out of updated info failed... */
+ /* same code that's in qib_free_tid() */
+ limit = sizeof(tidmap) * BITS_PER_BYTE;
+ if (limit > tidcnt)
+ /* just in case size changes in future */
+ limit = tidcnt;
+ tid = find_first_bit((const unsigned long *)tidmap, limit);
+ for (; tid < limit; tid++) {
+ if (!test_bit(tid, tidmap))
+ continue;
+ if (dd->pageshadow[ctxttid + tid]) {
+ dma_addr_t phys;
+
+ phys = dd->physshadow[ctxttid + tid];
+ dd->physshadow[ctxttid + tid] = dd->tidinvalid;
+ /* PERFORMANCE: below should almost certainly
+ * be cached
+ */
+ dd->f_put_tid(dd, &tidbase[tid],
+ RCVHQ_RCV_TYPE_EXPECTED,
+ dd->tidinvalid);
+ pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dd->pageshadow[ctxttid + tid] = NULL;
+ }
+ }
+ qib_release_user_pages(pagep, cnt);
+ } else {
+ /*
+ * Copy the updated array, with qib_tid's filled in, back
+ * to user. Since we did the copy in already, this "should
+ * never fail" If it does, we have to clean up...
+ */
+ if (copy_to_user((void __user *)
+ (unsigned long) ti->tidlist,
+ tidlist, cnt * sizeof(*tidlist))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
+ tidmap, sizeof tidmap)) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ if (tid == tidcnt)
+ tid = 0;
+ if (!rcd->subctxt_cnt)
+ rcd->tidcursor = tid;
+ else
+ tidcursor_fp(fp) = tid;
+ }
+
+done:
+ return ret;
+}
+
+/**
+ * qib_tid_free - free a context TID
+ * @rcd: the context
+ * @subctxt: the subcontext
+ * @ti: the TID info
+ *
+ * right now we are unlocking one page at a time, but since
+ * the intended use of this routine is for a single group of
+ * virtually contiguous pages, that should change to improve
+ * performance. We check that the TID is in range for this context
+ * but otherwise don't check validity; if user has an error and
+ * frees the wrong tid, it's only their own data that can thereby
+ * be corrupted. We do check that the TID was in use, for sanity
+ * We always use our idea of the saved address, not the address that
+ * they pass in to us.
+ */
+static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
+ const struct qib_tid_info *ti)
+{
+ int ret = 0;
+ u32 tid, ctxttid, cnt, limit, tidcnt;
+ struct qib_devdata *dd = rcd->dd;
+ u64 __iomem *tidbase;
+ unsigned long tidmap[8];
+
+ if (!dd->pageshadow) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
+ sizeof tidmap)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ ctxttid = rcd->ctxt * dd->rcvtidcnt;
+ if (!rcd->subctxt_cnt)
+ tidcnt = dd->rcvtidcnt;
+ else if (!subctxt) {
+ tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
+ (dd->rcvtidcnt % rcd->subctxt_cnt);
+ ctxttid += dd->rcvtidcnt - tidcnt;
+ } else {
+ tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
+ ctxttid += tidcnt * (subctxt - 1);
+ }
+ tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
+ dd->rcvtidbase +
+ ctxttid * sizeof(*tidbase));
+
+ limit = sizeof(tidmap) * BITS_PER_BYTE;
+ if (limit > tidcnt)
+ /* just in case size changes in future */
+ limit = tidcnt;
+ tid = find_first_bit(tidmap, limit);
+ for (cnt = 0; tid < limit; tid++) {
+ /*
+ * small optimization; if we detect a run of 3 or so without
+ * any set, use find_first_bit again. That's mainly to
+ * accelerate the case where we wrapped, so we have some at
+ * the beginning, and some at the end, and a big gap
+ * in the middle.
+ */
+ if (!test_bit(tid, tidmap))
+ continue;
+ cnt++;
+ if (dd->pageshadow[ctxttid + tid]) {
+ struct page *p;
+ dma_addr_t phys;
+
+ p = dd->pageshadow[ctxttid + tid];
+ dd->pageshadow[ctxttid + tid] = NULL;
+ phys = dd->physshadow[ctxttid + tid];
+ dd->physshadow[ctxttid + tid] = dd->tidinvalid;
+ /* PERFORMANCE: below should almost certainly be
+ * cached
+ */
+ dd->f_put_tid(dd, &tidbase[tid],
+ RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
+ pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ qib_release_user_pages(&p, 1);
+ }
+ }
+done:
+ return ret;
+}
+
+/**
+ * qib_set_part_key - set a partition key
+ * @rcd: the context
+ * @key: the key
+ *
+ * We can have up to 4 active at a time (other than the default, which is
+ * always allowed). This is somewhat tricky, since multiple contexts may set
+ * the same key, so we reference count them, and clean up at exit. All 4
+ * partition keys are packed into a single qlogic_ib register. It's an
+ * error for a process to set the same pkey multiple times. We provide no
+ * mechanism to de-allocate a pkey at this time, we may eventually need to
+ * do that. I've used the atomic operations, and no locking, and only make
+ * a single pass through what's available. This should be more than
+ * adequate for some time. I'll think about spinlocks or the like if and as
+ * it's necessary.
+ */
+static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
+{
+ struct qib_pportdata *ppd = rcd->ppd;
+ int i, any = 0, pidx = -1;
+ u16 lkey = key & 0x7FFF;
+ int ret;
+
+ if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
+ /* nothing to do; this key always valid */
+ ret = 0;
+ goto bail;
+ }
+
+ if (!lkey) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Set the full membership bit, because it has to be
+ * set in the register or the packet, and it seems
+ * cleaner to set in the register than to force all
+ * callers to set it.
+ */
+ key |= 0x8000;
+
+ for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+ if (!rcd->pkeys[i] && pidx == -1)
+ pidx = i;
+ if (rcd->pkeys[i] == key) {
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (pidx == -1) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i]) {
+ any++;
+ continue;
+ }
+ if (ppd->pkeys[i] == key) {
+ atomic_t *pkrefs = &ppd->pkeyrefs[i];
+
+ if (atomic_inc_return(pkrefs) > 1) {
+ rcd->pkeys[pidx] = key;
+ ret = 0;
+ goto bail;
+ } else {
+ /*
+ * lost race, decrement count, catch below
+ */
+ atomic_dec(pkrefs);
+ any++;
+ }
+ }
+ if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+ /*
+ * It makes no sense to have both the limited and
+ * full membership PKEY set at the same time since
+ * the unlimited one will disable the limited one.
+ */
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (!any) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i] &&
+ atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
+ rcd->pkeys[pidx] = key;
+ ppd->pkeys[i] = key;
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+ ret = 0;
+ goto bail;
+ }
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_manage_rcvq - manage a context's receive queue
+ * @rcd: the context
+ * @subctxt: the subcontext
+ * @start_stop: action to carry out
+ *
+ * start_stop == 0 disables receive on the context, for use in queue
+ * overflow conditions. start_stop==1 re-enables, to be used to
+ * re-init the software copy of the head register
+ */
+static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
+ int start_stop)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned int rcvctrl_op;
+
+ if (subctxt)
+ goto bail;
+ /* atomically clear receive enable ctxt. */
+ if (start_stop) {
+ /*
+ * On enable, force in-memory copy of the tail register to
+ * 0, so that protocol code doesn't have to worry about
+ * whether or not the chip has yet updated the in-memory
+ * copy or not on return from the system call. The chip
+ * always resets it's tail register back to 0 on a
+ * transition from disabled to enabled.
+ */
+ if (rcd->rcvhdrtail_kvaddr)
+ qib_clear_rcvhdrtail(rcd);
+ rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
+ } else
+ rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
+ dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
+ /* always; new head should be equal to new tail; see above */
+bail:
+ return 0;
+}
+
+static void qib_clean_part_key(struct qib_ctxtdata *rcd,
+ struct qib_devdata *dd)
+{
+ int i, j, pchanged = 0;
+ u64 oldpkey;
+ struct qib_pportdata *ppd = rcd->ppd;
+
+ /* for debugging only */
+ oldpkey = (u64) ppd->pkeys[0] |
+ ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+
+ for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+ if (!rcd->pkeys[i])
+ continue;
+ for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
+ /* check for match independent of the global bit */
+ if ((ppd->pkeys[j] & 0x7fff) !=
+ (rcd->pkeys[i] & 0x7fff))
+ continue;
+ if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
+ ppd->pkeys[j] = 0;
+ pchanged++;
+ }
+ break;
+ }
+ rcd->pkeys[i] = 0;
+ }
+ if (pchanged)
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+}
+
+/* common code for the mappings on dma_alloc_coherent mem */
+static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
+ unsigned len, void *kvaddr, u32 write_ok, char *what)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned long pfn;
+ int ret;
+
+ if ((vma->vm_end - vma->vm_start) > len) {
+ qib_devinfo(dd->pcidev,
+ "FAIL on %s: len %lx > %x\n", what,
+ vma->vm_end - vma->vm_start, len);
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ /*
+ * shared context user code requires rcvhdrq mapped r/w, others
+ * only allowed readonly mapping.
+ */
+ if (!write_ok) {
+ if (vma->vm_flags & VM_WRITE) {
+ qib_devinfo(dd->pcidev,
+ "%s must be mapped readonly\n", what);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ /* don't allow them to later change with mprotect */
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
+ pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
+ ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ len, vma->vm_page_prot);
+ if (ret)
+ qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x "
+ "bytes failed: %d\n", what, rcd->ctxt,
+ pfn, len, ret);
+bail:
+ return ret;
+}
+
+static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
+ u64 ureg)
+{
+ unsigned long phys;
+ unsigned long sz;
+ int ret;
+
+ /*
+ * This is real hardware, so use io_remap. This is the mechanism
+ * for the user process to update the head registers for their ctxt
+ * in the chip.
+ */
+ sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
+ if ((vma->vm_end - vma->vm_start) > sz) {
+ qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen "
+ "%lx > PAGE\n", vma->vm_end - vma->vm_start);
+ ret = -EFAULT;
+ } else {
+ phys = dd->physaddr + ureg;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ phys >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+ return ret;
+}
+
+static int mmap_piobufs(struct vm_area_struct *vma,
+ struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd,
+ unsigned piobufs, unsigned piocnt)
+{
+ unsigned long phys;
+ int ret;
+
+ /*
+ * When we map the PIO buffers in the chip, we want to map them as
+ * writeonly, no read possible; unfortunately, x86 doesn't allow
+ * for this in hardware, but we still prevent users from asking
+ * for it.
+ */
+ if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
+ qib_devinfo(dd->pcidev, "FAIL mmap piobufs: "
+ "reqlen %lx > PAGE\n",
+ vma->vm_end - vma->vm_start);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ phys = dd->physaddr + piobufs;
+
+#if defined(__powerpc__)
+ /* There isn't a generic way to specify writethrough mappings */
+ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+ pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
+#endif
+
+ /*
+ * don't allow them to later change to readable with mprotect (for when
+ * not initially mapped readable, as is normally the case)
+ */
+ vma->vm_flags &= ~VM_MAYREAD;
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+
+ if (qib_wc_pat)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+bail:
+ return ret;
+}
+
+static int mmap_rcvegrbufs(struct vm_area_struct *vma,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned long start, size;
+ size_t total_size, i;
+ unsigned long pfn;
+ int ret;
+
+ size = rcd->rcvegrbuf_size;
+ total_size = rcd->rcvegrbuf_chunks * size;
+ if ((vma->vm_end - vma->vm_start) > total_size) {
+ qib_devinfo(dd->pcidev, "FAIL on egr bufs: "
+ "reqlen %lx > actual %lx\n",
+ vma->vm_end - vma->vm_start,
+ (unsigned long) total_size);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (vma->vm_flags & VM_WRITE) {
+ qib_devinfo(dd->pcidev, "Can't map eager buffers as "
+ "writable (flags=%lx)\n", vma->vm_flags);
+ ret = -EPERM;
+ goto bail;
+ }
+ /* don't allow them to later change to writeable with mprotect */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ start = vma->vm_start;
+
+ for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
+ pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
+ ret = remap_pfn_range(vma, start, pfn, size,
+ vma->vm_page_prot);
+ if (ret < 0)
+ goto bail;
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/*
+ * qib_file_vma_fault - handle a VMA page fault.
+ */
+static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page;
+
+ page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
+ if (!page)
+ return VM_FAULT_SIGBUS;
+
+ get_page(page);
+ vmf->page = page;
+
+ return 0;
+}
+
+static struct vm_operations_struct qib_file_vm_ops = {
+ .fault = qib_file_vma_fault,
+};
+
+static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
+ struct qib_ctxtdata *rcd, unsigned subctxt)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned subctxt_cnt;
+ unsigned long len;
+ void *addr;
+ size_t size;
+ int ret = 0;
+
+ subctxt_cnt = rcd->subctxt_cnt;
+ size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
+
+ /*
+ * Each process has all the subctxt uregbase, rcvhdrq, and
+ * rcvegrbufs mmapped - as an array for all the processes,
+ * and also separately for this process.
+ */
+ if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
+ addr = rcd->subctxt_uregbase;
+ size = PAGE_SIZE * subctxt_cnt;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
+ addr = rcd->subctxt_rcvhdr_base;
+ size = rcd->rcvhdrq_size * subctxt_cnt;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
+ addr = rcd->subctxt_rcvegrbuf;
+ size *= subctxt_cnt;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
+ PAGE_SIZE * subctxt)) {
+ addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
+ size = PAGE_SIZE;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
+ rcd->rcvhdrq_size * subctxt)) {
+ addr = rcd->subctxt_rcvhdr_base +
+ rcd->rcvhdrq_size * subctxt;
+ size = rcd->rcvhdrq_size;
+ } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
+ addr = rcd->user_event_mask;
+ size = PAGE_SIZE;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
+ size * subctxt)) {
+ addr = rcd->subctxt_rcvegrbuf + size * subctxt;
+ /* rcvegrbufs are read-only on the slave */
+ if (vma->vm_flags & VM_WRITE) {
+ qib_devinfo(dd->pcidev,
+ "Can't map eager buffers as "
+ "writable (flags=%lx)\n", vma->vm_flags);
+ ret = -EPERM;
+ goto bail;
+ }
+ /*
+ * Don't allow permission to later change to writeable
+ * with mprotect.
+ */
+ vma->vm_flags &= ~VM_MAYWRITE;
+ } else
+ goto bail;
+ len = vma->vm_end - vma->vm_start;
+ if (len > size) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
+ vma->vm_ops = &qib_file_vm_ops;
+ vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_mmapf - mmap various structures into user space
+ * @fp: the file pointer
+ * @vma: the VM area
+ *
+ * We use this to have a shared buffer between the kernel and the user code
+ * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
+ * buffers in the chip. We have the open and close entries so we can bump
+ * the ref count and keep the driver from being unloaded while still mapped.
+ */
+static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
+{
+ struct qib_ctxtdata *rcd;
+ struct qib_devdata *dd;
+ u64 pgaddr, ureg;
+ unsigned piobufs, piocnt;
+ int ret, match = 1;
+
+ rcd = ctxt_fp(fp);
+ if (!rcd || !(vma->vm_flags & VM_SHARED)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ dd = rcd->dd;
+
+ /*
+ * This is the qib_do_user_init() code, mapping the shared buffers
+ * and per-context user registers into the user process. The address
+ * referred to by vm_pgoff is the file offset passed via mmap().
+ * For shared contexts, this is the kernel vmalloc() address of the
+ * pages to share with the master.
+ * For non-shared or master ctxts, this is a physical address.
+ * We only do one mmap for each space mapped.
+ */
+ pgaddr = vma->vm_pgoff << PAGE_SHIFT;
+
+ /*
+ * Check for 0 in case one of the allocations failed, but user
+ * called mmap anyway.
+ */
+ if (!pgaddr) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Physical addresses must fit in 40 bits for our hardware.
+ * Check for kernel virtual addresses first, anything else must
+ * match a HW or memory address.
+ */
+ ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto bail;
+ }
+
+ ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
+ if (!rcd->subctxt_cnt) {
+ /* ctxt is not shared */
+ piocnt = rcd->piocnt;
+ piobufs = rcd->piobufs;
+ } else if (!subctxt_fp(fp)) {
+ /* caller is the master */
+ piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
+ (rcd->piocnt % rcd->subctxt_cnt);
+ piobufs = rcd->piobufs +
+ dd->palign * (rcd->piocnt - piocnt);
+ } else {
+ unsigned slave = subctxt_fp(fp) - 1;
+
+ /* caller is a slave */
+ piocnt = rcd->piocnt / rcd->subctxt_cnt;
+ piobufs = rcd->piobufs + dd->palign * piocnt * slave;
+ }
+
+ if (pgaddr == ureg)
+ ret = mmap_ureg(vma, dd, ureg);
+ else if (pgaddr == piobufs)
+ ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
+ else if (pgaddr == dd->pioavailregs_phys)
+ /* in-memory copy of pioavail registers */
+ ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
+ (void *) dd->pioavailregs_dma, 0,
+ "pioavail registers");
+ else if (pgaddr == rcd->rcvegr_phys)
+ ret = mmap_rcvegrbufs(vma, rcd);
+ else if (pgaddr == (u64) rcd->rcvhdrq_phys)
+ /*
+ * The rcvhdrq itself; multiple pages, contiguous
+ * from an i/o perspective. Shared contexts need
+ * to map r/w, so we allow writing.
+ */
+ ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
+ rcd->rcvhdrq, 1, "rcvhdrq");
+ else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
+ /* in-memory copy of rcvhdrq tail register */
+ ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
+ rcd->rcvhdrtail_kvaddr, 0,
+ "rcvhdrq tail");
+ else
+ match = 0;
+ if (!match)
+ ret = -EINVAL;
+
+ vma->vm_private_data = NULL;
+
+ if (ret < 0)
+ qib_devinfo(dd->pcidev,
+ "mmap Failure %d: off %llx len %lx\n",
+ -ret, (unsigned long long)pgaddr,
+ vma->vm_end - vma->vm_start);
+bail:
+ return ret;
+}
+
+static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
+ struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned pollflag;
+
+ poll_wait(fp, &rcd->wait, pt);
+
+ spin_lock_irq(&dd->uctxt_lock);
+ if (rcd->urgent != rcd->urgent_poll) {
+ pollflag = POLLIN | POLLRDNORM;
+ rcd->urgent_poll = rcd->urgent;
+ } else {
+ pollflag = 0;
+ set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
+ }
+ spin_unlock_irq(&dd->uctxt_lock);
+
+ return pollflag;
+}
+
+static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
+ struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned pollflag;
+
+ poll_wait(fp, &rcd->wait, pt);
+
+ spin_lock_irq(&dd->uctxt_lock);
+ if (dd->f_hdrqempty(rcd)) {
+ set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
+ pollflag = 0;
+ } else
+ pollflag = POLLIN | POLLRDNORM;
+ spin_unlock_irq(&dd->uctxt_lock);
+
+ return pollflag;
+}
+
+static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned pollflag;
+
+ rcd = ctxt_fp(fp);
+ if (!rcd)
+ pollflag = POLLERR;
+ else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
+ pollflag = qib_poll_urgent(rcd, fp, pt);
+ else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
+ pollflag = qib_poll_next(rcd, fp, pt);
+ else /* invalid */
+ pollflag = POLLERR;
+
+ return pollflag;
+}
+
+/*
+ * Check that userland and driver are compatible for subcontexts.
+ */
+static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
+{
+ /* this code is written long-hand for clarity */
+ if (QIB_USER_SWMAJOR != user_swmajor) {
+ /* no promise of compatibility if major mismatch */
+ return 0;
+ }
+ if (QIB_USER_SWMAJOR == 1) {
+ switch (QIB_USER_SWMINOR) {
+ case 0:
+ case 1:
+ case 2:
+ /* no subctxt implementation so cannot be compatible */
+ return 0;
+ case 3:
+ /* 3 is only compatible with itself */
+ return user_swminor == 3;
+ default:
+ /* >= 4 are compatible (or are expected to be) */
+ return user_swminor >= 4;
+ }
+ }
+ /* make no promises yet for future major versions */
+ return 0;
+}
+
+static int init_subctxts(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd,
+ const struct qib_user_info *uinfo)
+{
+ int ret = 0;
+ unsigned num_subctxts;
+ size_t size;
+
+ /*
+ * If the user is requesting zero subctxts,
+ * skip the subctxt allocation.
+ */
+ if (uinfo->spu_subctxt_cnt <= 0)
+ goto bail;
+ num_subctxts = uinfo->spu_subctxt_cnt;
+
+ /* Check for subctxt compatibility */
+ if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
+ uinfo->spu_userversion & 0xffff)) {
+ qib_devinfo(dd->pcidev,
+ "Mismatched user version (%d.%d) and driver "
+ "version (%d.%d) while context sharing. Ensure "
+ "that driver and library are from the same "
+ "release.\n",
+ (int) (uinfo->spu_userversion >> 16),
+ (int) (uinfo->spu_userversion & 0xffff),
+ QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
+ goto bail;
+ }
+ if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
+ if (!rcd->subctxt_uregbase) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ /* Note: rcd->rcvhdrq_size isn't initialized yet. */
+ size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE) * num_subctxts;
+ rcd->subctxt_rcvhdr_base = vmalloc_user(size);
+ if (!rcd->subctxt_rcvhdr_base) {
+ ret = -ENOMEM;
+ goto bail_ureg;
+ }
+
+ rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
+ rcd->rcvegrbuf_size *
+ num_subctxts);
+ if (!rcd->subctxt_rcvegrbuf) {
+ ret = -ENOMEM;
+ goto bail_rhdr;
+ }
+
+ rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
+ rcd->subctxt_id = uinfo->spu_subctxt_id;
+ rcd->active_slaves = 1;
+ rcd->redirect_seq_cnt = 1;
+ set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
+ goto bail;
+
+bail_rhdr:
+ vfree(rcd->subctxt_rcvhdr_base);
+bail_ureg:
+ vfree(rcd->subctxt_uregbase);
+ rcd->subctxt_uregbase = NULL;
+bail:
+ return ret;
+}
+
+static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
+ struct file *fp, const struct qib_user_info *uinfo)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+ void *ptmp = NULL;
+ int ret;
+
+ rcd = qib_create_ctxtdata(ppd, ctxt);
+
+ /*
+ * Allocate memory for use in qib_tid_update() at open to
+ * reduce cost of expected send setup per message segment
+ */
+ if (rcd)
+ ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
+ dd->rcvtidcnt * sizeof(struct page **),
+ GFP_KERNEL);
+
+ if (!rcd || !ptmp) {
+ qib_dev_err(dd, "Unable to allocate ctxtdata "
+ "memory, failing open\n");
+ ret = -ENOMEM;
+ goto bailerr;
+ }
+ rcd->userversion = uinfo->spu_userversion;
+ ret = init_subctxts(dd, rcd, uinfo);
+ if (ret)
+ goto bailerr;
+ rcd->tid_pg_list = ptmp;
+ rcd->pid = current->pid;
+ init_waitqueue_head(&dd->rcd[ctxt]->wait);
+ strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
+ ctxt_fp(fp) = rcd;
+ qib_stats.sps_ctxts++;
+ ret = 0;
+ goto bail;
+
+bailerr:
+ dd->rcd[ctxt] = NULL;
+ kfree(rcd);
+ kfree(ptmp);
+bail:
+ return ret;
+}
+
+static inline int usable(struct qib_pportdata *ppd, int active_only)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 linkok = active_only ? QIBL_LINKACTIVE :
+ (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
+
+ return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
+ (ppd->lflags & linkok);
+}
+
+static int find_free_ctxt(int unit, struct file *fp,
+ const struct qib_user_info *uinfo)
+{
+ struct qib_devdata *dd = qib_lookup(unit);
+ struct qib_pportdata *ppd = NULL;
+ int ret;
+ u32 ctxt;
+
+ if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ /*
+ * If users requests specific port, only try that one port, else
+ * select "best" port below, based on context.
+ */
+ if (uinfo->spu_port) {
+ ppd = dd->pport + uinfo->spu_port - 1;
+ if (!usable(ppd, 0)) {
+ ret = -ENETDOWN;
+ goto bail;
+ }
+ }
+
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ if (dd->rcd[ctxt])
+ continue;
+ /*
+ * The setting and clearing of user context rcd[x] protected
+ * by the qib_mutex
+ */
+ if (!ppd) {
+ /* choose port based on ctxt, if up, else 1st up */
+ ppd = dd->pport + (ctxt % dd->num_pports);
+ if (!usable(ppd, 0)) {
+ int i;
+ for (i = 0; i < dd->num_pports; i++) {
+ ppd = dd->pport + i;
+ if (usable(ppd, 0))
+ break;
+ }
+ if (i == dd->num_pports) {
+ ret = -ENETDOWN;
+ goto bail;
+ }
+ }
+ }
+ ret = setup_ctxt(ppd, ctxt, fp, uinfo);
+ goto bail;
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo)
+{
+ struct qib_pportdata *ppd;
+ int ret = 0, devmax;
+ int npresent, nup;
+ int ndev;
+ u32 port = uinfo->spu_port, ctxt;
+
+ devmax = qib_count_units(&npresent, &nup);
+
+ for (ndev = 0; ndev < devmax; ndev++) {
+ struct qib_devdata *dd = qib_lookup(ndev);
+
+ /* device portion of usable() */
+ if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
+ continue;
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ if (dd->rcd[ctxt])
+ continue;
+ if (port) {
+ if (port > dd->num_pports)
+ continue;
+ ppd = dd->pport + port - 1;
+ if (!usable(ppd, 0))
+ continue;
+ } else {
+ /*
+ * choose port based on ctxt, if up, else
+ * first port that's up for multi-port HCA
+ */
+ ppd = dd->pport + (ctxt % dd->num_pports);
+ if (!usable(ppd, 0)) {
+ int j;
+
+ ppd = NULL;
+ for (j = 0; j < dd->num_pports &&
+ !ppd; j++)
+ if (usable(dd->pport + j, 0))
+ ppd = dd->pport + j;
+ if (!ppd)
+ continue; /* to next unit */
+ }
+ }
+ ret = setup_ctxt(ppd, ctxt, fp, uinfo);
+ goto done;
+ }
+ }
+
+ if (npresent) {
+ if (nup == 0)
+ ret = -ENETDOWN;
+ else
+ ret = -EBUSY;
+ } else
+ ret = -ENXIO;
+
+done:
+ return ret;
+}
+
+static int find_shared_ctxt(struct file *fp,
+ const struct qib_user_info *uinfo)
+{
+ int devmax, ndev, i;
+ int ret = 0;
+
+ devmax = qib_count_units(NULL, NULL);
+
+ for (ndev = 0; ndev < devmax; ndev++) {
+ struct qib_devdata *dd = qib_lookup(ndev);
+
+ /* device portion of usable() */
+ if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
+ continue;
+ for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
+ struct qib_ctxtdata *rcd = dd->rcd[i];
+
+ /* Skip ctxts which are not yet open */
+ if (!rcd || !rcd->cnt)
+ continue;
+ /* Skip ctxt if it doesn't match the requested one */
+ if (rcd->subctxt_id != uinfo->spu_subctxt_id)
+ continue;
+ /* Verify the sharing process matches the master */
+ if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
+ rcd->userversion != uinfo->spu_userversion ||
+ rcd->cnt >= rcd->subctxt_cnt) {
+ ret = -EINVAL;
+ goto done;
+ }
+ ctxt_fp(fp) = rcd;
+ subctxt_fp(fp) = rcd->cnt++;
+ rcd->subpid[subctxt_fp(fp)] = current->pid;
+ tidcursor_fp(fp) = 0;
+ rcd->active_slaves |= 1 << subctxt_fp(fp);
+ ret = 1;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
+static int qib_open(struct inode *in, struct file *fp)
+{
+ /* The real work is performed later in qib_assign_ctxt() */
+ fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
+ if (fp->private_data) /* no cpu affinity by default */
+ ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
+ return fp->private_data ? 0 : -ENOMEM;
+}
+
+/*
+ * Get ctxt early, so can set affinity prior to memory allocation.
+ */
+static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
+{
+ int ret;
+ int i_minor;
+ unsigned swmajor, swminor;
+
+ /* Check to be sure we haven't already initialized this file */
+ if (ctxt_fp(fp)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* for now, if major version is different, bail */
+ swmajor = uinfo->spu_userversion >> 16;
+ if (swmajor != QIB_USER_SWMAJOR) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ swminor = uinfo->spu_userversion & 0xffff;
+
+ mutex_lock(&qib_mutex);
+
+ if (qib_compatible_subctxts(swmajor, swminor) &&
+ uinfo->spu_subctxt_cnt) {
+ ret = find_shared_ctxt(fp, uinfo);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto done_chk_sdma;
+ }
+ }
+
+ i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
+ if (i_minor)
+ ret = find_free_ctxt(i_minor - 1, fp, uinfo);
+ else
+ ret = get_a_ctxt(fp, uinfo);
+
+done_chk_sdma:
+ if (!ret) {
+ struct qib_filedata *fd = fp->private_data;
+ const struct qib_ctxtdata *rcd = fd->rcd;
+ const struct qib_devdata *dd = rcd->dd;
+
+ if (dd->flags & QIB_HAS_SEND_DMA) {
+ fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
+ dd->unit,
+ rcd->ctxt,
+ fd->subctxt);
+ if (!fd->pq)
+ ret = -ENOMEM;
+ }
+
+ /*
+ * If process has NOT already set it's affinity, select and
+ * reserve a processor for it, as a rendevous for all
+ * users of the driver. If they don't actually later
+ * set affinity to this cpu, or set it to some other cpu,
+ * it just means that sooner or later we don't recommend
+ * a cpu, and let the scheduler do it's best.
+ */
+ if (!ret && cpus_weight(current->cpus_allowed) >=
+ qib_cpulist_count) {
+ int cpu;
+ cpu = find_first_zero_bit(qib_cpulist,
+ qib_cpulist_count);
+ if (cpu != qib_cpulist_count) {
+ __set_bit(cpu, qib_cpulist);
+ fd->rec_cpu_num = cpu;
+ }
+ } else if (cpus_weight(current->cpus_allowed) == 1 &&
+ test_bit(first_cpu(current->cpus_allowed),
+ qib_cpulist))
+ qib_devinfo(dd->pcidev, "%s PID %u affinity "
+ "set to cpu %d; already allocated\n",
+ current->comm, current->pid,
+ first_cpu(current->cpus_allowed));
+ }
+
+ mutex_unlock(&qib_mutex);
+
+done:
+ return ret;
+}
+
+
+static int qib_do_user_init(struct file *fp,
+ const struct qib_user_info *uinfo)
+{
+ int ret;
+ struct qib_ctxtdata *rcd = ctxt_fp(fp);
+ struct qib_devdata *dd;
+ unsigned uctxt;
+
+ /* Subctxts don't need to initialize anything since master did it. */
+ if (subctxt_fp(fp)) {
+ ret = wait_event_interruptible(rcd->wait,
+ !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
+ goto bail;
+ }
+
+ dd = rcd->dd;
+
+ /* some ctxts may get extra buffers, calculate that here */
+ uctxt = rcd->ctxt - dd->first_user_ctxt;
+ if (uctxt < dd->ctxts_extrabuf) {
+ rcd->piocnt = dd->pbufsctxt + 1;
+ rcd->pio_base = rcd->piocnt * uctxt;
+ } else {
+ rcd->piocnt = dd->pbufsctxt;
+ rcd->pio_base = rcd->piocnt * uctxt +
+ dd->ctxts_extrabuf;
+ }
+
+ /*
+ * All user buffers are 2KB buffers. If we ever support
+ * giving 4KB buffers to user processes, this will need some
+ * work. Can't use piobufbase directly, because it has
+ * both 2K and 4K buffer base values. So check and handle.
+ */
+ if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
+ if (rcd->pio_base >= dd->piobcnt2k) {
+ qib_dev_err(dd,
+ "%u:ctxt%u: no 2KB buffers available\n",
+ dd->unit, rcd->ctxt);
+ ret = -ENOBUFS;
+ goto bail;
+ }
+ rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
+ qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
+ rcd->ctxt, rcd->piocnt);
+ }
+
+ rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
+ qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
+ TXCHK_CHG_TYPE_USER, rcd);
+ /*
+ * try to ensure that processes start up with consistent avail update
+ * for their own range, at least. If system very quiet, it might
+ * have the in-memory copy out of date at startup for this range of
+ * buffers, when a context gets re-used. Do after the chg_pioavail
+ * and before the rest of setup, so it's "almost certain" the dma
+ * will have occurred (can't 100% guarantee, but should be many
+ * decimals of 9s, with this ordering), given how much else happens
+ * after this.
+ */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+
+ /*
+ * Now allocate the rcvhdr Q and eager TIDs; skip the TID
+ * array for time being. If rcd->ctxt > chip-supported,
+ * we need to do extra stuff here to handle by handling overflow
+ * through ctxt 0, someday
+ */
+ ret = qib_create_rcvhdrq(dd, rcd);
+ if (!ret)
+ ret = qib_setup_eagerbufs(rcd);
+ if (ret)
+ goto bail_pio;
+
+ rcd->tidcursor = 0; /* start at beginning after open */
+
+ /* initialize poll variables... */
+ rcd->urgent = 0;
+ rcd->urgent_poll = 0;
+
+ /*
+ * Now enable the ctxt for receive.
+ * For chips that are set to DMA the tail register to memory
+ * when they change (and when the update bit transitions from
+ * 0 to 1. So for those chips, we turn it off and then back on.
+ * This will (very briefly) affect any other open ctxts, but the
+ * duration is very short, and therefore isn't an issue. We
+ * explictly set the in-memory tail copy to 0 beforehand, so we
+ * don't have to wait to be sure the DMA update has happened
+ * (chip resets head/tail to 0 on transition to enable).
+ */
+ if (rcd->rcvhdrtail_kvaddr)
+ qib_clear_rcvhdrtail(rcd);
+
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
+ rcd->ctxt);
+
+ /* Notify any waiting slaves */
+ if (rcd->subctxt_cnt) {
+ clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
+ wake_up(&rcd->wait);
+ }
+ return 0;
+
+bail_pio:
+ qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
+ TXCHK_CHG_TYPE_KERN, rcd);
+bail:
+ return ret;
+}
+
+/**
+ * unlock_exptid - unlock any expected TID entries context still had in use
+ * @rcd: ctxt
+ *
+ * We don't actually update the chip here, because we do a bulk update
+ * below, using f_clear_tids.
+ */
+static void unlock_expected_tids(struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
+ int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
+
+ for (i = ctxt_tidbase; i < maxtid; i++) {
+ struct page *p = dd->pageshadow[i];
+ dma_addr_t phys;
+
+ if (!p)
+ continue;
+
+ phys = dd->physshadow[i];
+ dd->physshadow[i] = dd->tidinvalid;
+ dd->pageshadow[i] = NULL;
+ pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ qib_release_user_pages(&p, 1);
+ cnt++;
+ }
+}
+
+static int qib_close(struct inode *in, struct file *fp)
+{
+ int ret = 0;
+ struct qib_filedata *fd;
+ struct qib_ctxtdata *rcd;
+ struct qib_devdata *dd;
+ unsigned long flags;
+ unsigned ctxt;
+ pid_t pid;
+
+ mutex_lock(&qib_mutex);
+
+ fd = (struct qib_filedata *) fp->private_data;
+ fp->private_data = NULL;
+ rcd = fd->rcd;
+ if (!rcd) {
+ mutex_unlock(&qib_mutex);
+ goto bail;
+ }
+
+ dd = rcd->dd;
+
+ /* ensure all pio buffer writes in progress are flushed */
+ qib_flush_wc();
+
+ /* drain user sdma queue */
+ if (fd->pq) {
+ qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
+ qib_user_sdma_queue_destroy(fd->pq);
+ }
+
+ if (fd->rec_cpu_num != -1)
+ __clear_bit(fd->rec_cpu_num, qib_cpulist);
+
+ if (--rcd->cnt) {
+ /*
+ * XXX If the master closes the context before the slave(s),
+ * revoke the mmap for the eager receive queue so
+ * the slave(s) don't wait for receive data forever.
+ */
+ rcd->active_slaves &= ~(1 << fd->subctxt);
+ rcd->subpid[fd->subctxt] = 0;
+ mutex_unlock(&qib_mutex);
+ goto bail;
+ }
+
+ /* early; no interrupt users after this */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ ctxt = rcd->ctxt;
+ dd->rcd[ctxt] = NULL;
+ pid = rcd->pid;
+ rcd->pid = 0;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ if (rcd->rcvwait_to || rcd->piowait_to ||
+ rcd->rcvnowait || rcd->pionowait) {
+ rcd->rcvwait_to = 0;
+ rcd->piowait_to = 0;
+ rcd->rcvnowait = 0;
+ rcd->pionowait = 0;
+ }
+ if (rcd->flag)
+ rcd->flag = 0;
+
+ if (dd->kregbase) {
+ /* atomically clear receive enable ctxt and intr avail. */
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
+ QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
+
+ /* clean up the pkeys for this ctxt user */
+ qib_clean_part_key(rcd, dd);
+ qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
+ qib_chg_pioavailkernel(dd, rcd->pio_base,
+ rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
+
+ dd->f_clear_tids(dd, rcd);
+
+ if (dd->pageshadow)
+ unlock_expected_tids(rcd);
+ qib_stats.sps_ctxts--;
+ }
+
+ mutex_unlock(&qib_mutex);
+ qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
+
+bail:
+ kfree(fd);
+ return ret;
+}
+
+static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
+{
+ struct qib_ctxt_info info;
+ int ret;
+ size_t sz;
+ struct qib_ctxtdata *rcd = ctxt_fp(fp);
+ struct qib_filedata *fd;
+
+ fd = (struct qib_filedata *) fp->private_data;
+
+ info.num_active = qib_count_active_units();
+ info.unit = rcd->dd->unit;
+ info.port = rcd->ppd->port;
+ info.ctxt = rcd->ctxt;
+ info.subctxt = subctxt_fp(fp);
+ /* Number of user ctxts available for this device. */
+ info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
+ info.num_subctxts = rcd->subctxt_cnt;
+ info.rec_cpu = fd->rec_cpu_num;
+ sz = sizeof(info);
+
+ if (copy_to_user(uinfo, &info, sz)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
+ u32 __user *inflightp)
+{
+ const u32 val = qib_user_sdma_inflight_counter(pq);
+
+ if (put_user(val, inflightp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int qib_sdma_get_complete(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq,
+ u32 __user *completep)
+{
+ u32 val;
+ int err;
+
+ if (!pq)
+ return -EINVAL;
+
+ err = qib_user_sdma_make_progress(ppd, pq);
+ if (err < 0)
+ return err;
+
+ val = qib_user_sdma_complete_counter(pq);
+ if (put_user(val, completep))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int disarm_req_delay(struct qib_ctxtdata *rcd)
+{
+ int ret = 0;
+
+ if (!usable(rcd->ppd, 1)) {
+ int i;
+ /*
+ * if link is down, or otherwise not usable, delay
+ * the caller up to 30 seconds, so we don't thrash
+ * in trying to get the chip back to ACTIVE, and
+ * set flag so they make the call again.
+ */
+ if (rcd->user_event_mask) {
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++)
+ msleep(100);
+ ret = -ENETDOWN;
+ }
+ return ret;
+}
+
+/*
+ * Find all user contexts in use, and set the specified bit in their
+ * event mask.
+ * See also find_ctxt() for a similar use, that is specific to send buffers.
+ */
+int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned ctxt;
+ int ret = 0;
+
+ spin_lock(&ppd->dd->uctxt_lock);
+ for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
+ ctxt++) {
+ rcd = ppd->dd->rcd[ctxt];
+ if (!rcd)
+ continue;
+ if (rcd->user_event_mask) {
+ int i;
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ set_bit(evtbit, &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(evtbit, &rcd->user_event_mask[i]);
+ }
+ ret = 1;
+ break;
+ }
+ spin_unlock(&ppd->dd->uctxt_lock);
+
+ return ret;
+}
+
+/*
+ * clear the event notifier events for this context.
+ * For the DISARM_BUFS case, we also take action (this obsoletes
+ * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
+ * compatibility.
+ * Other bits don't currently require actions, just atomically clear.
+ * User process then performs actions appropriate to bit having been
+ * set, if desired, and checks again in future.
+ */
+static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
+ unsigned long events)
+{
+ int ret = 0, i;
+
+ for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
+ if (!test_bit(i, &events))
+ continue;
+ if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
+ (void)qib_disarm_piobufs_ifneeded(rcd);
+ ret = disarm_req_delay(rcd);
+ } else
+ clear_bit(i, &rcd->user_event_mask[subctxt]);
+ }
+ return ret;
+}
+
+static ssize_t qib_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off)
+{
+ const struct qib_cmd __user *ucmd;
+ struct qib_ctxtdata *rcd;
+ const void __user *src;
+ size_t consumed, copy = 0;
+ struct qib_cmd cmd;
+ ssize_t ret = 0;
+ void *dest;
+
+ if (count < sizeof(cmd.type)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ucmd = (const struct qib_cmd __user *) data;
+
+ if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ consumed = sizeof(cmd.type);
+
+ switch (cmd.type) {
+ case QIB_CMD_ASSIGN_CTXT:
+ case QIB_CMD_USER_INIT:
+ copy = sizeof(cmd.cmd.user_info);
+ dest = &cmd.cmd.user_info;
+ src = &ucmd->cmd.user_info;
+ break;
+
+ case QIB_CMD_RECV_CTRL:
+ copy = sizeof(cmd.cmd.recv_ctrl);
+ dest = &cmd.cmd.recv_ctrl;
+ src = &ucmd->cmd.recv_ctrl;
+ break;
+
+ case QIB_CMD_CTXT_INFO:
+ copy = sizeof(cmd.cmd.ctxt_info);
+ dest = &cmd.cmd.ctxt_info;
+ src = &ucmd->cmd.ctxt_info;
+ break;
+
+ case QIB_CMD_TID_UPDATE:
+ case QIB_CMD_TID_FREE:
+ copy = sizeof(cmd.cmd.tid_info);
+ dest = &cmd.cmd.tid_info;
+ src = &ucmd->cmd.tid_info;
+ break;
+
+ case QIB_CMD_SET_PART_KEY:
+ copy = sizeof(cmd.cmd.part_key);
+ dest = &cmd.cmd.part_key;
+ src = &ucmd->cmd.part_key;
+ break;
+
+ case QIB_CMD_DISARM_BUFS:
+ case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
+ copy = 0;
+ src = NULL;
+ dest = NULL;
+ break;
+
+ case QIB_CMD_POLL_TYPE:
+ copy = sizeof(cmd.cmd.poll_type);
+ dest = &cmd.cmd.poll_type;
+ src = &ucmd->cmd.poll_type;
+ break;
+
+ case QIB_CMD_ARMLAUNCH_CTRL:
+ copy = sizeof(cmd.cmd.armlaunch_ctrl);
+ dest = &cmd.cmd.armlaunch_ctrl;
+ src = &ucmd->cmd.armlaunch_ctrl;
+ break;
+
+ case QIB_CMD_SDMA_INFLIGHT:
+ copy = sizeof(cmd.cmd.sdma_inflight);
+ dest = &cmd.cmd.sdma_inflight;
+ src = &ucmd->cmd.sdma_inflight;
+ break;
+
+ case QIB_CMD_SDMA_COMPLETE:
+ copy = sizeof(cmd.cmd.sdma_complete);
+ dest = &cmd.cmd.sdma_complete;
+ src = &ucmd->cmd.sdma_complete;
+ break;
+
+ case QIB_CMD_ACK_EVENT:
+ copy = sizeof(cmd.cmd.event_mask);
+ dest = &cmd.cmd.event_mask;
+ src = &ucmd->cmd.event_mask;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (copy) {
+ if ((count - consumed) < copy) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (copy_from_user(dest, src, copy)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ consumed += copy;
+ }
+
+ rcd = ctxt_fp(fp);
+ if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ switch (cmd.type) {
+ case QIB_CMD_ASSIGN_CTXT:
+ ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
+ if (ret)
+ goto bail;
+ break;
+
+ case QIB_CMD_USER_INIT:
+ ret = qib_do_user_init(fp, &cmd.cmd.user_info);
+ if (ret)
+ goto bail;
+ ret = qib_get_base_info(fp, (void __user *) (unsigned long)
+ cmd.cmd.user_info.spu_base_info,
+ cmd.cmd.user_info.spu_base_info_size);
+ break;
+
+ case QIB_CMD_RECV_CTRL:
+ ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
+ break;
+
+ case QIB_CMD_CTXT_INFO:
+ ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
+ (unsigned long) cmd.cmd.ctxt_info);
+ break;
+
+ case QIB_CMD_TID_UPDATE:
+ ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
+ break;
+
+ case QIB_CMD_TID_FREE:
+ ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
+ break;
+
+ case QIB_CMD_SET_PART_KEY:
+ ret = qib_set_part_key(rcd, cmd.cmd.part_key);
+ break;
+
+ case QIB_CMD_DISARM_BUFS:
+ (void)qib_disarm_piobufs_ifneeded(rcd);
+ ret = disarm_req_delay(rcd);
+ break;
+
+ case QIB_CMD_PIOAVAILUPD:
+ qib_force_pio_avail_update(rcd->dd);
+ break;
+
+ case QIB_CMD_POLL_TYPE:
+ rcd->poll_type = cmd.cmd.poll_type;
+ break;
+
+ case QIB_CMD_ARMLAUNCH_CTRL:
+ rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
+ break;
+
+ case QIB_CMD_SDMA_INFLIGHT:
+ ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
+ (u32 __user *) (unsigned long)
+ cmd.cmd.sdma_inflight);
+ break;
+
+ case QIB_CMD_SDMA_COMPLETE:
+ ret = qib_sdma_get_complete(rcd->ppd,
+ user_sdma_queue_fp(fp),
+ (u32 __user *) (unsigned long)
+ cmd.cmd.sdma_complete);
+ break;
+
+ case QIB_CMD_ACK_EVENT:
+ ret = qib_user_event_ack(rcd, subctxt_fp(fp),
+ cmd.cmd.event_mask);
+ break;
+ }
+
+ if (ret >= 0)
+ ret = consumed;
+
+bail:
+ return ret;
+}
+
+static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long dim, loff_t off)
+{
+ struct qib_filedata *fp = iocb->ki_filp->private_data;
+ struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
+ struct qib_user_sdma_queue *pq = fp->pq;
+
+ if (!dim || !pq)
+ return -EINVAL;
+
+ return qib_user_sdma_writev(rcd, pq, iov, dim);
+}
+
+static struct class *qib_class;
+static dev_t qib_dev;
+
+int qib_cdev_init(int minor, const char *name,
+ const struct file_operations *fops,
+ struct cdev **cdevp, struct device **devp)
+{
+ const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
+ struct cdev *cdev;
+ struct device *device = NULL;
+ int ret;
+
+ cdev = cdev_alloc();
+ if (!cdev) {
+ printk(KERN_ERR QIB_DRV_NAME
+ ": Could not allocate cdev for minor %d, %s\n",
+ minor, name);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ cdev->owner = THIS_MODULE;
+ cdev->ops = fops;
+ kobject_set_name(&cdev->kobj, name);
+
+ ret = cdev_add(cdev, dev, 1);
+ if (ret < 0) {
+ printk(KERN_ERR QIB_DRV_NAME
+ ": Could not add cdev for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+ goto err_cdev;
+ }
+
+ device = device_create(qib_class, NULL, dev, NULL, name);
+ if (!IS_ERR(device))
+ goto done;
+ ret = PTR_ERR(device);
+ device = NULL;
+ printk(KERN_ERR QIB_DRV_NAME ": Could not create "
+ "device for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+err_cdev:
+ cdev_del(cdev);
+ cdev = NULL;
+done:
+ *cdevp = cdev;
+ *devp = device;
+ return ret;
+}
+
+void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
+{
+ struct device *device = *devp;
+
+ if (device) {
+ device_unregister(device);
+ *devp = NULL;
+ }
+
+ if (*cdevp) {
+ cdev_del(*cdevp);
+ *cdevp = NULL;
+ }
+}
+
+static struct cdev *wildcard_cdev;
+static struct device *wildcard_device;
+
+int __init qib_dev_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
+ if (ret < 0) {
+ printk(KERN_ERR QIB_DRV_NAME ": Could not allocate "
+ "chrdev region (err %d)\n", -ret);
+ goto done;
+ }
+
+ qib_class = class_create(THIS_MODULE, "ipath");
+ if (IS_ERR(qib_class)) {
+ ret = PTR_ERR(qib_class);
+ printk(KERN_ERR QIB_DRV_NAME ": Could not create "
+ "device class (err %d)\n", -ret);
+ unregister_chrdev_region(qib_dev, QIB_NMINORS);
+ }
+
+done:
+ return ret;
+}
+
+void qib_dev_cleanup(void)
+{
+ if (qib_class) {
+ class_destroy(qib_class);
+ qib_class = NULL;
+ }
+
+ unregister_chrdev_region(qib_dev, QIB_NMINORS);
+}
+
+static atomic_t user_count = ATOMIC_INIT(0);
+
+static void qib_user_remove(struct qib_devdata *dd)
+{
+ if (atomic_dec_return(&user_count) == 0)
+ qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
+
+ qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
+}
+
+static int qib_user_add(struct qib_devdata *dd)
+{
+ char name[10];
+ int ret;
+
+ if (atomic_inc_return(&user_count) == 1) {
+ ret = qib_cdev_init(0, "ipath", &qib_file_ops,
+ &wildcard_cdev, &wildcard_device);
+ if (ret)
+ goto done;
+ }
+
+ snprintf(name, sizeof(name), "ipath%d", dd->unit);
+ ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
+ &dd->user_cdev, &dd->user_device);
+ if (ret)
+ qib_user_remove(dd);
+done:
+ return ret;
+}
+
+/*
+ * Create per-unit files in /dev
+ */
+int qib_device_create(struct qib_devdata *dd)
+{
+ int r, ret;
+
+ r = qib_user_add(dd);
+ ret = qib_diag_add(dd);
+ if (r && !ret)
+ ret = r;
+ return ret;
+}
+
+/*
+ * Remove per-unit files in /dev
+ * void, core kernel returns no errors for this stuff
+ */
+void qib_device_remove(struct qib_devdata *dd)
+{
+ qib_user_remove(dd);
+ qib_diag_remove(dd);
+}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
new file mode 100644
index 000000000000..844954bf417b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+
+#include "qib.h"
+
+#define QIBFS_MAGIC 0x726a77
+
+static struct super_block *qib_super;
+
+#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
+
+static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
+ int mode, const struct file_operations *fops,
+ void *data)
+{
+ int error;
+ struct inode *inode = new_inode(dir->i_sb);
+
+ if (!inode) {
+ error = -EPERM;
+ goto bail;
+ }
+
+ inode->i_mode = mode;
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ inode->i_blocks = 0;
+ inode->i_atime = CURRENT_TIME;
+ inode->i_mtime = inode->i_atime;
+ inode->i_ctime = inode->i_atime;
+ inode->i_private = data;
+ if ((mode & S_IFMT) == S_IFDIR) {
+ inode->i_op = &simple_dir_inode_operations;
+ inc_nlink(inode);
+ inc_nlink(dir);
+ }
+
+ inode->i_fop = fops;
+
+ d_instantiate(dentry, inode);
+ error = 0;
+
+bail:
+ return error;
+}
+
+static int create_file(const char *name, mode_t mode,
+ struct dentry *parent, struct dentry **dentry,
+ const struct file_operations *fops, void *data)
+{
+ int error;
+
+ *dentry = NULL;
+ mutex_lock(&parent->d_inode->i_mutex);
+ *dentry = lookup_one_len(name, parent, strlen(name));
+ if (!IS_ERR(*dentry))
+ error = qibfs_mknod(parent->d_inode, *dentry,
+ mode, fops, data);
+ else
+ error = PTR_ERR(*dentry);
+ mutex_unlock(&parent->d_inode->i_mutex);
+
+ return error;
+}
+
+static ssize_t driver_stats_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, count, ppos, &qib_stats,
+ sizeof qib_stats);
+}
+
+/*
+ * driver stats field names, one line per stat, single string. Used by
+ * programs like ipathstats to print the stats in a way which works for
+ * different versions of drivers, without changing program source.
+ * if qlogic_ib_stats changes, this needs to change. Names need to be
+ * 12 chars or less (w/o newline), for proper display by ipathstats utility.
+ */
+static const char qib_statnames[] =
+ "KernIntr\n"
+ "ErrorIntr\n"
+ "Tx_Errs\n"
+ "Rcv_Errs\n"
+ "H/W_Errs\n"
+ "NoPIOBufs\n"
+ "CtxtsOpen\n"
+ "RcvLen_Errs\n"
+ "EgrBufFull\n"
+ "EgrHdrFull\n"
+ ;
+
+static ssize_t driver_names_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, count, ppos, qib_statnames,
+ sizeof qib_statnames - 1); /* no null */
+}
+
+static const struct file_operations driver_ops[] = {
+ { .read = driver_stats_read, },
+ { .read = driver_names_read, },
+};
+
+/* read the per-device counters */
+static ssize_t dev_counters_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters);
+ return simple_read_from_buffer(buf, count, ppos, counters, avail);
+}
+
+/* read the per-device counters */
+static ssize_t dev_names_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *names;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_cntrs(dd, *ppos, &names, NULL);
+ return simple_read_from_buffer(buf, count, ppos, names, avail);
+}
+
+static const struct file_operations cntr_ops[] = {
+ { .read = dev_counters_read, },
+ { .read = dev_names_read, },
+};
+
+/*
+ * Could use file->f_dentry->d_inode->i_ino to figure out which file,
+ * instead of separate routine for each, but for now, this works...
+ */
+
+/* read the per-port names (same for each port) */
+static ssize_t portnames_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *names;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL);
+ return simple_read_from_buffer(buf, count, ppos, names, avail);
+}
+
+/* read the per-port counters for port 1 (pidx 0) */
+static ssize_t portcntrs_1_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters);
+ return simple_read_from_buffer(buf, count, ppos, counters, avail);
+}
+
+/* read the per-port counters for port 2 (pidx 1) */
+static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters);
+ return simple_read_from_buffer(buf, count, ppos, counters, avail);
+}
+
+static const struct file_operations portcntr_ops[] = {
+ { .read = portnames_read, },
+ { .read = portcntrs_1_read, },
+ { .read = portcntrs_2_read, },
+};
+
+/*
+ * read the per-port QSFP data for port 1 (pidx 0)
+ */
+static ssize_t qsfp_1_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd = private2dd(file);
+ char *tmp;
+ int ret;
+
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE);
+ if (ret > 0)
+ ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
+ kfree(tmp);
+ return ret;
+}
+
+/*
+ * read the per-port QSFP data for port 2 (pidx 1)
+ */
+static ssize_t qsfp_2_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd = private2dd(file);
+ char *tmp;
+ int ret;
+
+ if (dd->num_pports < 2)
+ return -ENODEV;
+
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE);
+ if (ret > 0)
+ ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
+ kfree(tmp);
+ return ret;
+}
+
+static const struct file_operations qsfp_ops[] = {
+ { .read = qsfp_1_read, },
+ { .read = qsfp_2_read, },
+};
+
+static ssize_t flash_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd;
+ ssize_t ret;
+ loff_t pos;
+ char *tmp;
+
+ pos = *ppos;
+
+ if (pos < 0) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (pos >= sizeof(struct qib_flash)) {
+ ret = 0;
+ goto bail;
+ }
+
+ if (count > sizeof(struct qib_flash) - pos)
+ count = sizeof(struct qib_flash) - pos;
+
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ dd = private2dd(file);
+ if (qib_eeprom_read(dd, pos, tmp, count)) {
+ qib_dev_err(dd, "failed to read from flash\n");
+ ret = -ENXIO;
+ goto bail_tmp;
+ }
+
+ if (copy_to_user(buf, tmp, count)) {
+ ret = -EFAULT;
+ goto bail_tmp;
+ }
+
+ *ppos = pos + count;
+ ret = count;
+
+bail_tmp:
+ kfree(tmp);
+
+bail:
+ return ret;
+}
+
+static ssize_t flash_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd;
+ ssize_t ret;
+ loff_t pos;
+ char *tmp;
+
+ pos = *ppos;
+
+ if (pos != 0) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (count != sizeof(struct qib_flash)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ if (copy_from_user(tmp, buf, count)) {
+ ret = -EFAULT;
+ goto bail_tmp;
+ }
+
+ dd = private2dd(file);
+ if (qib_eeprom_write(dd, pos, tmp, count)) {
+ ret = -ENXIO;
+ qib_dev_err(dd, "failed to write to flash\n");
+ goto bail_tmp;
+ }
+
+ *ppos = pos + count;
+ ret = count;
+
+bail_tmp:
+ kfree(tmp);
+
+bail:
+ return ret;
+}
+
+static const struct file_operations flash_ops = {
+ .read = flash_read,
+ .write = flash_write,
+};
+
+static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
+{
+ struct dentry *dir, *tmp;
+ char unit[10];
+ int ret, i;
+
+ /* create the per-unit directory */
+ snprintf(unit, sizeof unit, "%u", dd->unit);
+ ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
+ &simple_dir_operations, dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
+ goto bail;
+ }
+
+ /* create the files in the new directory */
+ ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
+ &cntr_ops[0], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/counters) failed: %d\n",
+ unit, ret);
+ goto bail;
+ }
+ ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
+ &cntr_ops[1], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n",
+ unit, ret);
+ goto bail;
+ }
+ ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
+ &portcntr_ops[0], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ unit, "portcounter_names", ret);
+ goto bail;
+ }
+ for (i = 1; i <= dd->num_pports; i++) {
+ char fname[24];
+
+ sprintf(fname, "port%dcounters", i);
+ /* create the files in the new directory */
+ ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
+ &portcntr_ops[i], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ unit, fname, ret);
+ goto bail;
+ }
+ if (!(dd->flags & QIB_HAS_QSFP))
+ continue;
+ sprintf(fname, "qsfp%d", i);
+ ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
+ &qsfp_ops[i - 1], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ unit, fname, ret);
+ goto bail;
+ }
+ }
+
+ ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
+ &flash_ops, dd);
+ if (ret)
+ printk(KERN_ERR "create_file(%s/flash) failed: %d\n",
+ unit, ret);
+bail:
+ return ret;
+}
+
+static int remove_file(struct dentry *parent, char *name)
+{
+ struct dentry *tmp;
+ int ret;
+
+ tmp = lookup_one_len(name, parent, strlen(name));
+
+ if (IS_ERR(tmp)) {
+ ret = PTR_ERR(tmp);
+ goto bail;
+ }
+
+ spin_lock(&dcache_lock);
+ spin_lock(&tmp->d_lock);
+ if (!(d_unhashed(tmp) && tmp->d_inode)) {
+ dget_locked(tmp);
+ __d_drop(tmp);
+ spin_unlock(&tmp->d_lock);
+ spin_unlock(&dcache_lock);
+ simple_unlink(parent->d_inode, tmp);
+ } else {
+ spin_unlock(&tmp->d_lock);
+ spin_unlock(&dcache_lock);
+ }
+
+ ret = 0;
+bail:
+ /*
+ * We don't expect clients to care about the return value, but
+ * it's there if they need it.
+ */
+ return ret;
+}
+
+static int remove_device_files(struct super_block *sb,
+ struct qib_devdata *dd)
+{
+ struct dentry *dir, *root;
+ char unit[10];
+ int ret, i;
+
+ root = dget(sb->s_root);
+ mutex_lock(&root->d_inode->i_mutex);
+ snprintf(unit, sizeof unit, "%u", dd->unit);
+ dir = lookup_one_len(unit, root, strlen(unit));
+
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ printk(KERN_ERR "Lookup of %s failed\n", unit);
+ goto bail;
+ }
+
+ remove_file(dir, "counters");
+ remove_file(dir, "counter_names");
+ remove_file(dir, "portcounter_names");
+ for (i = 0; i < dd->num_pports; i++) {
+ char fname[24];
+
+ sprintf(fname, "port%dcounters", i + 1);
+ remove_file(dir, fname);
+ if (dd->flags & QIB_HAS_QSFP) {
+ sprintf(fname, "qsfp%d", i + 1);
+ remove_file(dir, fname);
+ }
+ }
+ remove_file(dir, "flash");
+ d_delete(dir);
+ ret = simple_rmdir(root->d_inode, dir);
+
+bail:
+ mutex_unlock(&root->d_inode->i_mutex);
+ dput(root);
+ return ret;
+}
+
+/*
+ * This fills everything in when the fs is mounted, to handle umount/mount
+ * after device init. The direct add_cntr_files() call handles adding
+ * them from the init code, when the fs is already mounted.
+ */
+static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct qib_devdata *dd, *tmp;
+ unsigned long flags;
+ int ret;
+
+ static struct tree_descr files[] = {
+ [2] = {"driver_stats", &driver_ops[0], S_IRUGO},
+ [3] = {"driver_stats_names", &driver_ops[1], S_IRUGO},
+ {""},
+ };
+
+ ret = simple_fill_super(sb, QIBFS_MAGIC, files);
+ if (ret) {
+ printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+
+ list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) {
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+ ret = add_cntr_files(sb, dd);
+ if (ret)
+ goto bail;
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+bail:
+ return ret;
+}
+
+static int qibfs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data, struct vfsmount *mnt)
+{
+ int ret = get_sb_single(fs_type, flags, data,
+ qibfs_fill_super, mnt);
+ if (ret >= 0)
+ qib_super = mnt->mnt_sb;
+ return ret;
+}
+
+static void qibfs_kill_super(struct super_block *s)
+{
+ kill_litter_super(s);
+ qib_super = NULL;
+}
+
+int qibfs_add(struct qib_devdata *dd)
+{
+ int ret;
+
+ /*
+ * On first unit initialized, qib_super will not yet exist
+ * because nobody has yet tried to mount the filesystem, so
+ * we can't consider that to be an error; if an error occurs
+ * during the mount, that will get a complaint, so this is OK.
+ * add_cntr_files() for all units is done at mount from
+ * qibfs_fill_super(), so one way or another, everything works.
+ */
+ if (qib_super == NULL)
+ ret = 0;
+ else
+ ret = add_cntr_files(qib_super, dd);
+ return ret;
+}
+
+int qibfs_remove(struct qib_devdata *dd)
+{
+ int ret = 0;
+
+ if (qib_super)
+ ret = remove_device_files(qib_super, dd);
+
+ return ret;
+}
+
+static struct file_system_type qibfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ipathfs",
+ .get_sb = qibfs_get_sb,
+ .kill_sb = qibfs_kill_super,
+};
+
+int __init qib_init_qibfs(void)
+{
+ return register_filesystem(&qibfs_fs_type);
+}
+
+int __exit qib_exit_qibfs(void)
+{
+ return unregister_filesystem(&qibfs_fs_type);
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
new file mode 100644
index 000000000000..1eadadc13da8
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -0,0 +1,3576 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the
+ * QLogic_IB 6120 PCIe chip.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <rdma/ib_verbs.h>
+
+#include "qib.h"
+#include "qib_6120_regs.h"
+
+static void qib_6120_setup_setextled(struct qib_pportdata *, u32);
+static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
+static u8 qib_6120_phys_portstate(u64);
+static u32 qib_6120_iblink_state(u64);
+
+/*
+ * This file contains all the chip-specific register information and
+ * access functions for the QLogic QLogic_IB PCI-Express chip.
+ *
+ */
+
+/* KREG_IDX uses machine-generated #defines */
+#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_rcvctrl KREG_IDX(RcvCtrl)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_partitionkey KREG_IDX(RcvPartitionKey)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibcctrl KREG_IDX(IBCCtrl)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0)
+#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_palign KREG_IDX(PageAlign)
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)
+#define kr_sendpiosize KREG_IDX(SendPIOSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_control KREG_IDX(Control)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_revision KREG_IDX(Revision)
+#define kr_portcnt KREG_IDX(PortCnt)
+#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)
+#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)
+#define kr_serdes_stat KREG_IDX(SerdesStat)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+
+/* These must only be written via qib_write_kreg_ctxt() */
+#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \
+ QIB_6120_LBIntCnt_OFFS) / sizeof(u64))
+
+#define cr_badformat CREG_IDX(RxBadFormatCnt)
+#define cr_erricrc CREG_IDX(RxICRCErrCnt)
+#define cr_errlink CREG_IDX(RxLinkProblemCnt)
+#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)
+#define cr_err_rlen CREG_IDX(RxLenErrCnt)
+#define cr_errslen CREG_IDX(TxLenErrCnt)
+#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
+#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
+#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define cr_lbint CREG_IDX(LBIntCnt)
+#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
+#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
+#define cr_pktrcv CREG_IDX(RxDataPktCnt)
+#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define cr_pktsend CREG_IDX(TxDataPktCnt)
+#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
+#define cr_rcvebp CREG_IDX(RxEBPCnt)
+#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
+#define cr_sendstall CREG_IDX(TxFlowStallCnt)
+#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
+#define cr_wordrcv CREG_IDX(RxDwordCnt)
+#define cr_wordsend CREG_IDX(TxDwordCnt)
+#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+
+#define SYM_RMASK(regname, fldname) ((u64) \
+ QIB_6120_##regname##_##fldname##_RMASK)
+#define SYM_MASK(regname, fldname) ((u64) \
+ QIB_6120_##regname##_##fldname##_RMASK << \
+ QIB_6120_##regname##_##fldname##_LSB)
+#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)
+
+#define SYM_FIELD(value, regname, fldname) ((u64) \
+ (((value) >> SYM_LSB(regname, fldname)) & \
+ SYM_RMASK(regname, fldname)))
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+
+/* link training states, from IBC */
+#define IB_6120_LT_STATE_DISABLED 0x00
+#define IB_6120_LT_STATE_LINKUP 0x01
+#define IB_6120_LT_STATE_POLLACTIVE 0x02
+#define IB_6120_LT_STATE_POLLQUIET 0x03
+#define IB_6120_LT_STATE_SLEEPDELAY 0x04
+#define IB_6120_LT_STATE_SLEEPQUIET 0x05
+#define IB_6120_LT_STATE_CFGDEBOUNCE 0x08
+#define IB_6120_LT_STATE_CFGRCVFCFG 0x09
+#define IB_6120_LT_STATE_CFGWAITRMT 0x0a
+#define IB_6120_LT_STATE_CFGIDLE 0x0b
+#define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c
+#define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e
+#define IB_6120_LT_STATE_RECOVERIDLE 0x0f
+
+/* link state machine states from IBC */
+#define IB_6120_L_STATE_DOWN 0x0
+#define IB_6120_L_STATE_INIT 0x1
+#define IB_6120_L_STATE_ARM 0x2
+#define IB_6120_L_STATE_ACTIVE 0x3
+#define IB_6120_L_STATE_ACT_DEFER 0x4
+
+static const u8 qib_6120_physportstate[0x20] = {
+ [IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+ [IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+ [IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+ [IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+ [IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_6120_LT_STATE_CFGDEBOUNCE] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_CFGRCVFCFG] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_CFGWAITRMT] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_RECOVERRETRAIN] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_6120_LT_STATE_RECOVERWAITRMT] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_6120_LT_STATE_RECOVERIDLE] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+
+struct qib_chip_specific {
+ u64 __iomem *cregbase;
+ u64 *cntrs;
+ u64 *portcntrs;
+ void *dummy_hdrq; /* used after ctxt close */
+ dma_addr_t dummy_hdrq_phys;
+ spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */
+ spinlock_t user_tid_lock; /* no back to back user TID writes */
+ spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+ spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+ u64 hwerrmask;
+ u64 errormask;
+ u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+ u64 gpio_mask; /* shadow the gpio mask register */
+ u64 extctrl; /* shadow the gpio output enable, etc... */
+ /*
+ * these 5 fields are used to establish deltas for IB symbol
+ * errors and linkrecovery errors. They can be reported on
+ * some chips during link negotiation prior to INIT, and with
+ * DDR when faking DDR negotiations with non-IBTA switches.
+ * The chip counters are adjusted at driver unload if there is
+ * a non-zero delta.
+ */
+ u64 ibdeltainprog;
+ u64 ibsymdelta;
+ u64 ibsymsnap;
+ u64 iblnkerrdelta;
+ u64 iblnkerrsnap;
+ u64 ibcctrl; /* shadow for kr_ibcctrl */
+ u32 lastlinkrecov; /* link recovery issue */
+ int irq;
+ u32 cntrnamelen;
+ u32 portcntrnamelen;
+ u32 ncntrs;
+ u32 nportcntrs;
+ /* used with gpio interrupts to implement IB counters */
+ u32 rxfc_unsupvl_errs;
+ u32 overrun_thresh_errs;
+ /*
+ * these count only cases where _successive_ LocalLinkIntegrity
+ * errors were seen in the receive headers of IB standard packets
+ */
+ u32 lli_errs;
+ u32 lli_counter;
+ u64 lli_thresh;
+ u64 sword; /* total dwords sent (sample result) */
+ u64 rword; /* total dwords received (sample result) */
+ u64 spkts; /* total packets sent (sample result) */
+ u64 rpkts; /* total packets received (sample result) */
+ u64 xmit_wait; /* # of ticks no data sent (sample result) */
+ struct timer_list pma_timer;
+ char emsgbuf[128];
+ char bitsmsgbuf[64];
+ u8 pma_sample_status;
+};
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18
+
+/*
+ * We could have a single register get/put routine, that takes a group type,
+ * but this is somewhat clearer and cleaner. It also gives us some error
+ * checking. 64 bit register reads should always work, but are inefficient
+ * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
+ * so we use kreg32 wherever possible. User register and counter register
+ * reads are always 32 bit reads, so only one form of those routines.
+ */
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+
+ if (dd->userbase)
+ return readl(regno + (u64 __iomem *)
+ ((char __iomem *)dd->userbase +
+ dd->ureg_align * ctxt));
+ else
+ return readl(regno + (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *)dd->kregbase +
+ dd->ureg_align * ctxt));
+}
+
+/**
+ * qib_write_ureg - write 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, u64 value, int ctxt)
+{
+ u64 __iomem *ubase;
+ if (dd->userbase)
+ ubase = (u64 __iomem *)
+ ((char __iomem *) dd->userbase +
+ dd->ureg_align * ctxt);
+ else
+ ubase = (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *) dd->kregbase +
+ dd->ureg_align * ctxt);
+
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &ubase[regno]);
+}
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readl((u32 __iomem *)&dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+
+ return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u16 regno, u64 value)
+{
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->kregbase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+ const u16 regno, unsigned ctxt,
+ u64 value)
+{
+ qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline void write_6120_creg(const struct qib_devdata *dd,
+ u16 regno, u64 value)
+{
+ if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->cspec->cregbase[regno]);
+}
+
+static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&dd->cspec->cregbase[regno]);
+}
+
+static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&dd->cspec->cregbase[regno]);
+}
+
+/* kr_control bits */
+#define QLOGIC_IB_C_RESET 1U
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)
+#define QLOGIC_IB_I_RCVURG_SHIFT 0
+#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)
+#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12
+
+#define QLOGIC_IB_C_FREEZEMODE 0x00000002
+#define QLOGIC_IB_C_LINKENABLE 0x00000004
+#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
+#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
+#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
+#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
+#define QLOGIC_IB_I_BITSEXTANT \
+ ((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
+ (QLOGIC_IB_I_RCVAVAIL_MASK << \
+ QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
+ QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
+ QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
+#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
+#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
+#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
+#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
+#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
+
+
+/* kr_extstatus bits */
+#define QLOGIC_IB_EXTS_FREQSEL 0x2
+#define QLOGIC_IB_EXTS_SERDESSEL 0x4
+#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
+#define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000
+
+/* kr_xgxsconfig bits */
+#define QLOGIC_IB_XGXS_RESET 0x5ULL
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+
+/* Bits in GPIO for the added IB link interrupts */
+#define GPIO_RXUVL_BIT 3
+#define GPIO_OVRUN_BIT 4
+#define GPIO_LLI_BIT 5
+#define GPIO_ERRINTR_MASK 0x38
+
+
+#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL
+#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \
+ ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
+#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))
+#define QLOGIC_IB_RT_IS_VALID(tid) \
+ (((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \
+ ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))
+#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
+#define QLOGIC_IB_RT_ADDR_SHIFT 10
+
+#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16
+#define QLOGIC_IB_R_TAILUPD_SHIFT 31
+#define IBA6120_R_PKEY_DIS_SHIFT 30
+
+#define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */
+
+#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
+#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
+
+#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
+ ((1ULL << (SYM_LSB(regname, fldname) + (bit)))))
+
+#define TXEMEMPARITYERR_PIOBUF \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
+#define TXEMEMPARITYERR_PIOPBC \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
+#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
+
+#define RXEMEMPARITYERR_RCVBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
+#define RXEMEMPARITYERR_LOOKUPQ \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
+#define RXEMEMPARITYERR_EXPTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
+#define RXEMEMPARITYERR_EAGERTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
+#define RXEMEMPARITYERR_FLAGBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
+#define RXEMEMPARITYERR_DATAINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
+#define RXEMEMPARITYERR_HDRINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
+
+/* 6120 specific hardware errors... */
+static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {
+ /* generic hardware errors */
+ QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
+ QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
+
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
+ "TXE PIOBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
+ "TXE PIOPBC Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
+ "TXE PIOLAUNCHFIFO Memory Parity"),
+
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
+ "RXE RCVBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
+ "RXE LOOKUPQ Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
+ "RXE EAGERTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
+ "RXE EXPTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
+ "RXE FLAGBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
+ "RXE DATAINFO Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
+ "RXE HDRINFO Memory Parity"),
+
+ /* chip-specific hardware errors */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
+ "PCIe Poisoned TLP"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
+ "PCIe completion timeout"),
+ /*
+ * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+ * parity or memory parity error failures, because most likely we
+ * won't be able to talk to the core of the chip. Nonetheless, we
+ * might see them, if they are in parts of the PCIe core that aren't
+ * essential.
+ */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
+ "PCIePLL1"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
+ "PCIePLL0"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
+ "PCIe XTLH core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
+ "PCIe ADM TX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
+ "PCIe ADM RX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
+ "SerDes PLL"),
+};
+
+#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)
+#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP)
+
+ /* variables for sanity checking interrupt and errors */
+#define IB_HWE_BITSEXTANT \
+ (HWE_MASK(RXEMemParityErr) | \
+ HWE_MASK(TXEMemParityErr) | \
+ (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
+ QLOGIC_IB_HWE_PCIE1PLLFAILED | \
+ QLOGIC_IB_HWE_PCIE0PLLFAILED | \
+ QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
+ QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
+ HWE_MASK(PowerOnBISTFailed) | \
+ QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP | \
+ QLOGIC_IB_HWE_SERDESPLLFAILED | \
+ HWE_MASK(IBCBusToSPCParityErr) | \
+ HWE_MASK(IBCBusFromSPCParityErr))
+
+#define IB_E_BITSEXTANT \
+ (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
+ ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
+ ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
+ ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
+ ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
+ ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \
+ ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendPioArmLaunchErr) | \
+ ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \
+ ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \
+ ERR_MASK(HardwareErr))
+
+#define QLOGIC_IB_E_PKTERRS ( \
+ ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | \
+ ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvEBPErr))
+
+/* These are all rcv-related errors which we want to count for stats */
+#define E_SUM_PKTERRS \
+ (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
+ ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
+ ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
+ ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
+
+/* These are all send-related errors which we want to count for stats */
+#define E_SUM_ERRS \
+ (ERR_MASK(SendPioArmLaunchErr) | \
+ ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(InvalidAddrErr))
+
+/*
+ * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
+ * errors not related to freeze and cancelling buffers. Can't ignore
+ * armlaunch because could get more while still cleaning up, and need
+ * to cancel those as they happen.
+ */
+#define E_SPKT_ERRS_IGNORE \
+ (ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
+ ERR_MASK(SendPktLenErr))
+
+/*
+ * these are errors that can occur when the link changes state while
+ * a packet is being sent or received. This doesn't cover things
+ * like EBP or VCRC that can be the result of a sending having the
+ * link change state, so we receive a "known bad" packet.
+ */
+#define E_SUM_LINK_PKTERRS \
+ (ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvUnexpectedCharErr))
+
+static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,
+ u32, unsigned long);
+
+/*
+ * On platforms using this chip, and not having ordered WC stores, we
+ * can get TXE parity errors due to speculative reads to the PIO buffers,
+ * and this, due to a chip issue can result in (many) false parity error
+ * reports. So it's a debug print on those, and an info print on systems
+ * where the speculative reads don't occur.
+ */
+static void qib_6120_txe_recover(struct qib_devdata *dd)
+{
+ if (!qib_unordered_wc())
+ qib_devinfo(dd->pcidev,
+ "Recovering from TXE PIO parity error\n");
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ if (dd->flags & QIB_BADINTR)
+ return;
+ qib_write_kreg(dd, kr_intmask, ~0ULL);
+ /* force re-interrupt of any pending interrupts. */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ } else
+ qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips
+ */
+static void qib_6120_clear_freeze(struct qib_devdata *dd)
+{
+ /* disable error interrupts, to avoid confusion */
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+
+ /* also disable interrupts; errormask is sometimes overwriten */
+ qib_6120_set_intr_state(dd, 0);
+
+ qib_cancel_sends(dd->pport);
+
+ /* clear the freeze, and be sure chip saw it */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+
+ /* force in-memory update now we are out of freeze */
+ qib_force_pio_avail_update(dd);
+
+ /*
+ * force new interrupt if any hwerr, error or interrupt bits are
+ * still set, and clear "safe" send packet errors related to freeze
+ * and cancelling sends. Re-enable error interrupts before possible
+ * force of re-interrupt on pending interrupts.
+ */
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+ qib_6120_set_intr_state(dd, 1);
+}
+
+/**
+ * qib_handle_6120_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use. Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue. Reuse the same message buffer as
+ * handle_6120_errors() to avoid excessive stack usage.
+ */
+static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
+ size_t msgl)
+{
+ u64 hwerrs;
+ u32 bits, ctrl;
+ int isfatal = 0;
+ char *bitsmsg;
+ int log_idx;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (!hwerrs)
+ return;
+ if (hwerrs == ~0ULL) {
+ qib_dev_err(dd, "Read of hardware error status failed "
+ "(all bits set); ignoring\n");
+ return;
+ }
+ qib_stats.sps_hwerrs++;
+
+ /* Always clear the error status register, except MEMBISTFAIL,
+ * regardless of whether we continue or stop using the chip.
+ * We want that set so we know it failed, even across driver reload.
+ * We'll still ignore it in the hwerrmask. We do this partly for
+ * diagnostics, but also for support */
+ qib_write_kreg(dd, kr_hwerrclear,
+ hwerrs & ~HWE_MASK(PowerOnBISTFailed));
+
+ hwerrs &= dd->cspec->hwerrmask;
+
+ /* We log some errors to EEPROM, check if we have any of those. */
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ /*
+ * Make sure we get this much out, unless told to be quiet,
+ * or it's occurred within the last 5 seconds.
+ */
+ if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
+ qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+ "(cleared)\n", (unsigned long long) hwerrs);
+
+ if (hwerrs & ~IB_HWE_BITSEXTANT)
+ qib_dev_err(dd, "hwerror interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (hwerrs & ~IB_HWE_BITSEXTANT));
+
+ ctrl = qib_read_kreg32(dd, kr_control);
+ if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
+ /*
+ * Parity errors in send memory are recoverable,
+ * just cancel the send (if indicated in * sendbuffererror),
+ * count the occurrence, unfreeze (if no other handled
+ * hardware error bits are set), and continue. They can
+ * occur if a processor speculative read is done to the PIO
+ * buffer while we are sending a packet, for example.
+ */
+ if (hwerrs & TXE_PIO_PARITY) {
+ qib_6120_txe_recover(dd);
+ hwerrs &= ~TXE_PIO_PARITY;
+ }
+
+ if (!hwerrs) {
+ static u32 freeze_cnt;
+
+ freeze_cnt++;
+ qib_6120_clear_freeze(dd);
+ } else
+ isfatal = 1;
+ }
+
+ *msg = '\0';
+
+ if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+ isfatal = 1;
+ strlcat(msg, "[Memory BIST test failed, InfiniPath hardware"
+ " unusable]", msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,
+ ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);
+
+ bitsmsg = dd->cspec->bitsmsgbuf;
+ if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PCIe Mem Parity Errs %x] ", bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+
+ if (hwerrs & _QIB_PLL_FAIL) {
+ isfatal = 1;
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PLL failed (%llx), InfiniPath hardware unusable]",
+ (unsigned long long) hwerrs & _QIB_PLL_FAIL);
+ strlcat(msg, bitsmsg, msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
+ /*
+ * If it occurs, it is left masked since the external
+ * interface is unused
+ */
+ dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ if (hwerrs)
+ /*
+ * if any set that we aren't ignoring; only
+ * make the complaint once, in case it's stuck
+ * or recurring, and we get here multiple
+ * times.
+ */
+ qib_dev_err(dd, "%s hardware error\n", msg);
+ else
+ *msg = 0; /* recovered from all of them */
+
+ if (isfatal && !dd->diag_client) {
+ qib_dev_err(dd, "Fatal Hardware Error, no longer"
+ " usable, SN %.16s\n", dd->serial);
+ /*
+ * for /sys status file and user programs to print; if no
+ * trailing brace is copied, we'll know it was truncated.
+ */
+ if (dd->freezemsg)
+ snprintf(dd->freezemsg, dd->freezelen,
+ "{%s}", msg);
+ qib_disable_after_error(dd);
+ }
+}
+
+/*
+ * Decode the error status into strings, deciding whether to always
+ * print * it or not depending on "normal packet errors" vs everything
+ * else. Return 1 if "real" errors, otherwise 0 if only packet
+ * errors, so caller can decide what to print with the string.
+ */
+static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
+ u64 err)
+{
+ int iserr = 1;
+
+ *buf = '\0';
+ if (err & QLOGIC_IB_E_PKTERRS) {
+ if (!(err & ~QLOGIC_IB_E_PKTERRS))
+ iserr = 0;
+ if ((err & ERR_MASK(RcvICRCErr)) &&
+ !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))
+ strlcat(buf, "CRC ", blen);
+ if (!iserr)
+ goto done;
+ }
+ if (err & ERR_MASK(RcvHdrLenErr))
+ strlcat(buf, "rhdrlen ", blen);
+ if (err & ERR_MASK(RcvBadTidErr))
+ strlcat(buf, "rbadtid ", blen);
+ if (err & ERR_MASK(RcvBadVersionErr))
+ strlcat(buf, "rbadversion ", blen);
+ if (err & ERR_MASK(RcvHdrErr))
+ strlcat(buf, "rhdr ", blen);
+ if (err & ERR_MASK(RcvLongPktLenErr))
+ strlcat(buf, "rlongpktlen ", blen);
+ if (err & ERR_MASK(RcvMaxPktLenErr))
+ strlcat(buf, "rmaxpktlen ", blen);
+ if (err & ERR_MASK(RcvMinPktLenErr))
+ strlcat(buf, "rminpktlen ", blen);
+ if (err & ERR_MASK(SendMinPktLenErr))
+ strlcat(buf, "sminpktlen ", blen);
+ if (err & ERR_MASK(RcvFormatErr))
+ strlcat(buf, "rformaterr ", blen);
+ if (err & ERR_MASK(RcvUnsupportedVLErr))
+ strlcat(buf, "runsupvl ", blen);
+ if (err & ERR_MASK(RcvUnexpectedCharErr))
+ strlcat(buf, "runexpchar ", blen);
+ if (err & ERR_MASK(RcvIBFlowErr))
+ strlcat(buf, "ribflow ", blen);
+ if (err & ERR_MASK(SendUnderRunErr))
+ strlcat(buf, "sunderrun ", blen);
+ if (err & ERR_MASK(SendPioArmLaunchErr))
+ strlcat(buf, "spioarmlaunch ", blen);
+ if (err & ERR_MASK(SendUnexpectedPktNumErr))
+ strlcat(buf, "sunexperrpktnum ", blen);
+ if (err & ERR_MASK(SendDroppedSmpPktErr))
+ strlcat(buf, "sdroppedsmppkt ", blen);
+ if (err & ERR_MASK(SendMaxPktLenErr))
+ strlcat(buf, "smaxpktlen ", blen);
+ if (err & ERR_MASK(SendUnsupportedVLErr))
+ strlcat(buf, "sunsupVL ", blen);
+ if (err & ERR_MASK(InvalidAddrErr))
+ strlcat(buf, "invalidaddr ", blen);
+ if (err & ERR_MASK(RcvEgrFullErr))
+ strlcat(buf, "rcvegrfull ", blen);
+ if (err & ERR_MASK(RcvHdrFullErr))
+ strlcat(buf, "rcvhdrfull ", blen);
+ if (err & ERR_MASK(IBStatusChanged))
+ strlcat(buf, "ibcstatuschg ", blen);
+ if (err & ERR_MASK(RcvIBLostLinkErr))
+ strlcat(buf, "riblostlink ", blen);
+ if (err & ERR_MASK(HardwareErr))
+ strlcat(buf, "hardware ", blen);
+ if (err & ERR_MASK(ResetNegated))
+ strlcat(buf, "reset ", blen);
+done:
+ return iserr;
+}
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used.
+ */
+static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)
+{
+ unsigned long sbuf[2];
+ struct qib_devdata *dd = ppd->dd;
+
+ /*
+ * It's possible that sendbuffererror could have bits set; might
+ * have already done this as a result of hardware error handling.
+ */
+ sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+ sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+
+ if (sbuf[0] || sbuf[1])
+ qib_disarm_piobufs_set(dd, sbuf,
+ dd->piobcnt2k + dd->piobcnt4k);
+}
+
+static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
+{
+ int ret = 1;
+ u32 ibstate = qib_6120_iblink_state(ibcs);
+ u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
+
+ if (linkrecov != dd->cspec->lastlinkrecov) {
+ /* and no more until active again */
+ dd->cspec->lastlinkrecov = 0;
+ qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
+ ret = 0;
+ }
+ if (ibstate == IB_PORT_ACTIVE)
+ dd->cspec->lastlinkrecov =
+ read_6120_creg32(dd, cr_iblinkerrrecov);
+ return ret;
+}
+
+static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
+{
+ char *msg;
+ u64 ignore_this_time = 0;
+ u64 iserr = 0;
+ int log_idx;
+ struct qib_pportdata *ppd = dd->pport;
+ u64 mask;
+
+ /* don't report errors that are masked */
+ errs &= dd->cspec->errormask;
+ msg = dd->cspec->emsgbuf;
+
+ /* do these first, they are most important */
+ if (errs & ERR_MASK(HardwareErr))
+ qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+ else
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ if (errs & ~IB_E_BITSEXTANT)
+ qib_dev_err(dd, "error interrupt with unknown errors "
+ "%llx set\n",
+ (unsigned long long) (errs & ~IB_E_BITSEXTANT));
+
+ if (errs & E_SUM_ERRS) {
+ qib_disarm_6120_senderrbufs(ppd);
+ if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when trying to bring the link
+ * up, but the IB link changes state at the "wrong"
+ * time. The IB logic then complains that the packet
+ * isn't valid. We don't want to confuse people, so
+ * we just don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+ } else if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when SMA is trying to bring the link
+ * up, but the IB link changes state at the "wrong" time.
+ * The IB logic then complains that the packet isn't
+ * valid. We don't want to confuse people, so we just
+ * don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+
+ qib_write_kreg(dd, kr_errclear, errs);
+
+ errs &= ~ignore_this_time;
+ if (!errs)
+ goto done;
+
+ /*
+ * The ones we mask off are handled specially below
+ * or above.
+ */
+ mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
+ ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
+ qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+
+ if (errs & E_SUM_PKTERRS)
+ qib_stats.sps_rcverrs++;
+ if (errs & E_SUM_ERRS)
+ qib_stats.sps_txerrs++;
+
+ iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);
+
+ if (errs & ERR_MASK(IBStatusChanged)) {
+ u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
+ u32 ibstate = qib_6120_iblink_state(ibcs);
+ int handle = 1;
+
+ if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
+ handle = chk_6120_linkrecovery(dd, ibcs);
+ /*
+ * Since going into a recovery state causes the link state
+ * to go down and since recovery is transitory, it is better
+ * if we "miss" ever seeing the link training state go into
+ * recovery (i.e., ignore this transition for link state
+ * special handling purposes) without updating lastibcstat.
+ */
+ if (handle && qib_6120_phys_portstate(ibcs) ==
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
+ handle = 0;
+ if (handle)
+ qib_handle_e_ibstatuschanged(ppd, ibcs);
+ }
+
+ if (errs & ERR_MASK(ResetNegated)) {
+ qib_dev_err(dd, "Got reset, requires re-init "
+ "(unload and reload driver)\n");
+ dd->flags &= ~QIB_INITTED; /* needs re-init */
+ /* mark as having had error */
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+ *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
+ }
+
+ if (*msg && iserr)
+ qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+ if (ppd->state_wanted & ppd->lflags)
+ wake_up_interruptible(&ppd->state_wait);
+
+ /*
+ * If there were hdrq or egrfull errors, wake up any processes
+ * waiting in poll. We used to try to check which contexts had
+ * the overflow, but given the cost of that and the chip reads
+ * to support it, it's better to just wake everybody up if we
+ * get an overflow; waiters can poll again if it's not them.
+ */
+ if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+ qib_handle_urcv(dd, ~0U);
+ if (errs & ERR_MASK(RcvEgrFullErr))
+ qib_stats.sps_buffull++;
+ else
+ qib_stats.sps_hdrfull++;
+ }
+done:
+ return;
+}
+
+/**
+ * qib_6120_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_6120_init_hwerrors(struct qib_devdata *dd)
+{
+ u64 val;
+ u64 extsval;
+
+ extsval = qib_read_kreg64(dd, kr_extstatus);
+
+ if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))
+ qib_dev_err(dd, "MemBIST did not complete!\n");
+
+ /* init so all hwerrors interrupt, and enter freeze, ajdust below */
+ val = ~0ULL;
+ if (dd->minrev < 2) {
+ /*
+ * Avoid problem with internal interface bus parity
+ * checking. Fixed in Rev2.
+ */
+ val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;
+ }
+ /* avoid some intel cpu's speculative read freeze mode issue */
+ val &= ~TXEMEMPARITYERR_PIOBUF;
+
+ dd->cspec->hwerrmask = val;
+
+ qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+ /* clear all */
+ qib_write_kreg(dd, kr_errclear, ~0ULL);
+ /* enable errors that are masked, at least this first time. */
+ qib_write_kreg(dd, kr_errmask, ~0ULL);
+ dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+ /* clear any interrupts up to this point (ints still not enabled) */
+ qib_write_kreg(dd, kr_intclear, ~0ULL);
+
+ qib_write_kreg(dd, kr_rcvbthqp,
+ dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
+ QIB_KD_QP);
+}
+
+/*
+ * Disable and enable the armlaunch error. Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based. There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ qib_write_kreg(dd, kr_errclear,
+ ERR_MASK(SendPioArmLaunchErr));
+ dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
+ } else
+ dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd)
+{
+ u64 mod_wd;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+ /*
+ * If we are told to disable, note that so link-recovery
+ * code does not attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+ /*
+ * Any other linkinitcmd will lead to LINKDOWN and then
+ * to INIT (if all is well), so clear flag to let
+ * link-recovery code attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+ mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |
+ (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
+ /* write to chip to prevent back-to-back writes of control reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+}
+
+/**
+ * qib_6120_bringup_serdes - bring up the serdes
+ * @dd: the qlogic_ib device
+ */
+static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val, config1, prev_val, hwstat, ibc;
+
+ /* Put IBC in reset, sends disabled */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control, 0ULL);
+
+ dd->cspec->ibdeltainprog = 1;
+ dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
+ dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
+
+ /* flowcontrolwatermark is in units of KBytes */
+ ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
+ /*
+ * How often flowctrl sent. More or less in usecs; balance against
+ * watermark value, so that in theory senders always get a flow
+ * control update in time to not let the IB link go idle.
+ */
+ ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
+ /* max error tolerance */
+ dd->cspec->lli_thresh = 0xf;
+ ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
+ /* use "real" buffer space for */
+ ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
+ /* IB credit flow control. */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
+ /*
+ * set initial max size pkt IBC will send, including ICRC; it's the
+ * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+ */
+ ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
+ dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
+
+ /* initially come up waiting for TS1, without sending anything. */
+ val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg(dd, kr_ibcctrl, val);
+
+ val = qib_read_kreg64(dd, kr_serdes_cfg0);
+ config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
+
+ /*
+ * Force reset on, also set rxdetect enable. Must do before reading
+ * serdesstatus at least for simulation, or some of the bits in
+ * serdes status will come back as undefined and cause simulation
+ * failures
+ */
+ val |= SYM_MASK(SerdesCfg0, ResetPLL) |
+ SYM_MASK(SerdesCfg0, RxDetEnX) |
+ (SYM_MASK(SerdesCfg0, L1PwrDnA) |
+ SYM_MASK(SerdesCfg0, L1PwrDnB) |
+ SYM_MASK(SerdesCfg0, L1PwrDnC) |
+ SYM_MASK(SerdesCfg0, L1PwrDnD));
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+ /* be sure chip saw it */
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(5); /* need pll reset set at least for a bit */
+ /*
+ * after PLL is reset, set the per-lane Resets and TxIdle and
+ * clear the PLL reset and rxdetect (to get falling edge).
+ * Leave L1PWR bits set (permanently)
+ */
+ val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |
+ SYM_MASK(SerdesCfg0, ResetPLL) |
+ (SYM_MASK(SerdesCfg0, L1PwrDnA) |
+ SYM_MASK(SerdesCfg0, L1PwrDnB) |
+ SYM_MASK(SerdesCfg0, L1PwrDnC) |
+ SYM_MASK(SerdesCfg0, L1PwrDnD)));
+ val |= (SYM_MASK(SerdesCfg0, ResetA) |
+ SYM_MASK(SerdesCfg0, ResetB) |
+ SYM_MASK(SerdesCfg0, ResetC) |
+ SYM_MASK(SerdesCfg0, ResetD)) |
+ SYM_MASK(SerdesCfg0, TxIdeEnX);
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+ /* be sure chip saw it */
+ (void) qib_read_kreg64(dd, kr_scratch);
+ /* need PLL reset clear for at least 11 usec before lane
+ * resets cleared; give it a few more to be sure */
+ udelay(15);
+ val &= ~((SYM_MASK(SerdesCfg0, ResetA) |
+ SYM_MASK(SerdesCfg0, ResetB) |
+ SYM_MASK(SerdesCfg0, ResetC) |
+ SYM_MASK(SerdesCfg0, ResetD)) |
+ SYM_MASK(SerdesCfg0, TxIdeEnX));
+
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+ /* be sure chip saw it */
+ (void) qib_read_kreg64(dd, kr_scratch);
+
+ val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ prev_val = val;
+ if (val & QLOGIC_IB_XGXS_RESET)
+ val &= ~QLOGIC_IB_XGXS_RESET;
+ if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {
+ /* need to compensate for Tx inversion in partner */
+ val &= ~SYM_MASK(XGXSCfg, polarity_inv);
+ val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);
+ }
+ if (val != prev_val)
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+
+ val = qib_read_kreg64(dd, kr_serdes_cfg0);
+
+ /* clear current and de-emphasis bits */
+ config1 &= ~0x0ffffffff00ULL;
+ /* set current to 20ma */
+ config1 |= 0x00000000000ULL;
+ /* set de-emphasis to -5.68dB */
+ config1 |= 0x0cccc000000ULL;
+ qib_write_kreg(dd, kr_serdes_cfg1, config1);
+
+ /* base and port guid same for single port */
+ ppd->guid = dd->base_guid;
+
+ /*
+ * the process of setting and un-resetting the serdes normally
+ * causes a serdes PLL error, so check for that and clear it
+ * here. Also clearr hwerr bit in errstatus, but not others.
+ */
+ hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (hwstat) {
+ /* should just have PLL, clear all set, in an case */
+ if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED)
+ qib_write_kreg(dd, kr_hwerrclear, hwstat);
+ qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
+ }
+
+ dd->control |= QLOGIC_IB_C_LINKENABLE;
+ dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
+ qib_write_kreg(dd, kr_control, dd->control);
+
+ return 0;
+}
+
+/**
+ * qib_6120_quiet_serdes - set serdes to txidle
+ * @ppd: physical port of the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val;
+
+ qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+ /* disable IBC */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control,
+ dd->control | QLOGIC_IB_C_FREEZEMODE);
+
+ if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
+ dd->cspec->ibdeltainprog) {
+ u64 diagc;
+
+ /* enable counter writes */
+ diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+ qib_write_kreg(dd, kr_hwdiagctrl,
+ diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+ if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
+ val = read_6120_creg32(dd, cr_ibsymbolerr);
+ if (dd->cspec->ibdeltainprog)
+ val -= val - dd->cspec->ibsymsnap;
+ val -= dd->cspec->ibsymdelta;
+ write_6120_creg(dd, cr_ibsymbolerr, val);
+ }
+ if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
+ val = read_6120_creg32(dd, cr_iblinkerrrecov);
+ if (dd->cspec->ibdeltainprog)
+ val -= val - dd->cspec->iblnkerrsnap;
+ val -= dd->cspec->iblnkerrdelta;
+ write_6120_creg(dd, cr_iblinkerrrecov, val);
+ }
+
+ /* and disable counter writes */
+ qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+ }
+
+ val = qib_read_kreg64(dd, kr_serdes_cfg0);
+ val |= SYM_MASK(SerdesCfg0, TxIdeEnX);
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+}
+
+/**
+ * qib_6120_setup_setextled - set the state of the two external LEDs
+ * @dd: the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note: We try to match the Mellanox HCA LED behavior as best
+ * we can. Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate. That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
+{
+ u64 extctl, val, lst, ltst;
+ unsigned long flags;
+ struct qib_devdata *dd = ppd->dd;
+
+ /*
+ * The diags use the LED to indicate diag info, so we leave
+ * the external LED alone when the diags are running.
+ */
+ if (dd->diag_client)
+ return;
+
+ /* Allow override of LED display for, e.g. Locating system in rack */
+ if (ppd->led_override) {
+ ltst = (ppd->led_override & QIB_LED_PHYS) ?
+ IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
+ lst = (ppd->led_override & QIB_LED_LOG) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ } else if (on) {
+ val = qib_read_kreg64(dd, kr_ibcstatus);
+ ltst = qib_6120_phys_portstate(val);
+ lst = qib_6120_iblink_state(val);
+ } else {
+ ltst = 0;
+ lst = 0;
+ }
+
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
+ SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
+
+ if (ltst == IB_PHYSPORTSTATE_LINKUP)
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
+ if (lst == IB_PORT_ACTIVE)
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
+ dd->cspec->extctrl = extctl;
+ qib_write_kreg(dd, kr_extctrl, extctl);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+}
+
+static void qib_6120_free_irq(struct qib_devdata *dd)
+{
+ if (dd->cspec->irq) {
+ free_irq(dd->cspec->irq, dd);
+ dd->cspec->irq = 0;
+ }
+ qib_nomsi(dd);
+}
+
+/**
+ * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the qlogic_ib device
+ *
+ * This is called during driver unload.
+*/
+static void qib_6120_setup_cleanup(struct qib_devdata *dd)
+{
+ qib_6120_free_irq(dd);
+ kfree(dd->cspec->cntrs);
+ kfree(dd->cspec->portcntrs);
+ if (dd->cspec->dummy_hdrq) {
+ dma_free_coherent(&dd->pcidev->dev,
+ ALIGN(dd->rcvhdrcnt *
+ dd->rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE),
+ dd->cspec->dummy_hdrq,
+ dd->cspec->dummy_hdrq_phys);
+ dd->cspec->dummy_hdrq = NULL;
+ }
+}
+
+static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (needint)
+ dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
+ else
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling
+ */
+static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
+{
+ if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
+ qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
+ istat & ~QLOGIC_IB_I_BITSEXTANT);
+
+ if (istat & QLOGIC_IB_I_ERROR) {
+ u64 estat = 0;
+
+ qib_stats.sps_errints++;
+ estat = qib_read_kreg64(dd, kr_errstatus);
+ if (!estat)
+ qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
+ "but no error bits set!\n", istat);
+ handle_6120_errors(dd, estat);
+ }
+
+ if (istat & QLOGIC_IB_I_GPIO) {
+ u32 gpiostatus;
+ u32 to_clear = 0;
+
+ /*
+ * GPIO_3..5 on IBA6120 Rev2 chips indicate
+ * errors that we need to count.
+ */
+ gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+ /* First the error-counter case. */
+ if (gpiostatus & GPIO_ERRINTR_MASK) {
+ /* want to clear the bits we see asserted. */
+ to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);
+
+ /*
+ * Count appropriately, clear bits out of our copy,
+ * as they have been "handled".
+ */
+ if (gpiostatus & (1 << GPIO_RXUVL_BIT))
+ dd->cspec->rxfc_unsupvl_errs++;
+ if (gpiostatus & (1 << GPIO_OVRUN_BIT))
+ dd->cspec->overrun_thresh_errs++;
+ if (gpiostatus & (1 << GPIO_LLI_BIT))
+ dd->cspec->lli_errs++;
+ gpiostatus &= ~GPIO_ERRINTR_MASK;
+ }
+ if (gpiostatus) {
+ /*
+ * Some unexpected bits remain. If they could have
+ * caused the interrupt, complain and clear.
+ * To avoid repetition of this condition, also clear
+ * the mask. It is almost certainly due to error.
+ */
+ const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+
+ /*
+ * Also check that the chip reflects our shadow,
+ * and report issues, If they caused the interrupt.
+ * we will suppress by refreshing from the shadow.
+ */
+ if (mask & gpiostatus) {
+ to_clear |= (gpiostatus & mask);
+ dd->cspec->gpio_mask &= ~(gpiostatus & mask);
+ qib_write_kreg(dd, kr_gpio_mask,
+ dd->cspec->gpio_mask);
+ }
+ }
+ if (to_clear)
+ qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
+ }
+}
+
+static irqreturn_t qib_6120intr(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+ irqreturn_t ret;
+ u32 istat, ctxtrbits, rmask, crcs = 0;
+ unsigned i;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ ret = IRQ_HANDLED;
+ goto bail;
+ }
+
+ istat = qib_read_kreg32(dd, kr_intstatus);
+
+ if (unlikely(!istat)) {
+ ret = IRQ_NONE; /* not our interrupt, or already handled */
+ goto bail;
+ }
+ if (unlikely(istat == -1)) {
+ qib_bad_intrstatus(dd);
+ /* don't know if it was our interrupt or not */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
+ QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
+ unlikely_6120_intr(dd, istat);
+
+ /*
+ * Clear the interrupt bits we found set, relatively early, so we
+ * "know" know the chip will have seen this by the time we process
+ * the queue, and will re-interrupt if necessary. The processor
+ * itself won't take the interrupt again until we return.
+ */
+ qib_write_kreg(dd, kr_intclear, istat);
+
+ /*
+ * Handle kernel receive queues before checking for pio buffers
+ * available since receives can overflow; piobuf waiters can afford
+ * a few extra cycles, since they were waiting anyway.
+ */
+ ctxtrbits = istat &
+ ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
+ if (ctxtrbits) {
+ rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (1U << QLOGIC_IB_I_RCVURG_SHIFT);
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ if (ctxtrbits & rmask) {
+ ctxtrbits &= ~rmask;
+ crcs += qib_kreceive(dd->rcd[i],
+ &dd->cspec->lli_counter,
+ NULL);
+ }
+ rmask <<= 1;
+ }
+ if (crcs) {
+ u32 cntr = dd->cspec->lli_counter;
+ cntr += crcs;
+ if (cntr) {
+ if (cntr > dd->cspec->lli_thresh) {
+ dd->cspec->lli_counter = 0;
+ dd->cspec->lli_errs++;
+ } else
+ dd->cspec->lli_counter += cntr;
+ }
+ }
+
+
+ if (ctxtrbits) {
+ ctxtrbits =
+ (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
+ qib_handle_urcv(dd, ctxtrbits);
+ }
+ }
+
+ if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+ qib_ib_piobufavail(dd);
+
+ ret = IRQ_HANDLED;
+bail:
+ return ret;
+}
+
+/*
+ * Set up our chip-specific interrupt handler
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ */
+static void qib_setup_6120_interrupt(struct qib_devdata *dd)
+{
+ /*
+ * If the chip supports added error indication via GPIO pins,
+ * enable interrupts on those bits so the interrupt routine
+ * can count the events. Also set flag so interrupt routine
+ * can know they are expected.
+ */
+ if (SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMinor) > 1) {
+ /* Rev2+ reports extra errors via internal GPIO pins */
+ dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ }
+
+ if (!dd->cspec->irq)
+ qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
+ "work\n");
+ else {
+ int ret;
+ ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
+ QIB_DRV_NAME, dd);
+ if (ret)
+ qib_dev_err(dd, "Couldn't setup interrupt "
+ "(irq=%d): %d\n", dd->cspec->irq,
+ ret);
+ }
+}
+
+/**
+ * pe_boardname - fill in the board name
+ * @dd: the qlogic_ib device
+ *
+ * info is based on the board revision register
+ */
+static void pe_boardname(struct qib_devdata *dd)
+{
+ char *n;
+ u32 boardid, namelen;
+
+ boardid = SYM_FIELD(dd->revision, Revision,
+ BoardID);
+
+ switch (boardid) {
+ case 2:
+ n = "InfiniPath_QLE7140";
+ break;
+ default:
+ qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
+ n = "Unknown_InfiniPath_6120";
+ break;
+ }
+ namelen = strlen(n) + 1;
+ dd->boardname = kmalloc(namelen, GFP_KERNEL);
+ if (!dd->boardname)
+ qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+ else
+ snprintf(dd->boardname, namelen, "%s", n);
+
+ if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
+ qib_dev_err(dd, "Unsupported InfiniPath hardware revision "
+ "%u.%u!\n", dd->majrev, dd->minrev);
+
+ snprintf(dd->boardversion, sizeof(dd->boardversion),
+ "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+ QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ dd->majrev, dd->minrev,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context. If we need interrupt context, we can split
+ * it into two routines.
+ */
+static int qib_6120_setup_reset(struct qib_devdata *dd)
+{
+ u64 val;
+ int i;
+ int ret;
+ u16 cmdval;
+ u8 int_line, clinesz;
+
+ qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+ /* Use ERROR so it shows up in logs, etc. */
+ qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+ /* no interrupts till re-initted */
+ qib_6120_set_intr_state(dd, 0);
+
+ dd->cspec->ibdeltainprog = 0;
+ dd->cspec->ibsymdelta = 0;
+ dd->cspec->iblnkerrdelta = 0;
+
+ /*
+ * Keep chip from being accessed until we are ready. Use
+ * writeq() directly, to allow the write even though QIB_PRESENT
+ * isnt' set.
+ */
+ dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
+ dd->int_counter = 0; /* so we check interrupts work again */
+ val = dd->control | QLOGIC_IB_C_RESET;
+ writeq(val, &dd->kregbase[kr_control]);
+ mb(); /* prevent compiler re-ordering around actual reset */
+
+ for (i = 1; i <= 5; i++) {
+ /*
+ * Allow MBIST, etc. to complete; longer on each retry.
+ * We sometimes get machine checks from bus timeout if no
+ * response, so for now, make it *really* long.
+ */
+ msleep(1000 + (1 + i) * 2000);
+
+ qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+ /*
+ * Use readq directly, so we don't need to mark it as PRESENT
+ * until we get a successful indication that all is well.
+ */
+ val = readq(&dd->kregbase[kr_revision]);
+ if (val == dd->revision) {
+ dd->flags |= QIB_PRESENT; /* it's back */
+ ret = qib_reinit_intr(dd);
+ goto bail;
+ }
+ }
+ ret = 0; /* failed */
+
+bail:
+ if (ret) {
+ if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
+ qib_dev_err(dd, "Reset failed to setup PCIe or "
+ "interrupts; continuing anyway\n");
+ /* clear the reset error, init error/hwerror mask */
+ qib_6120_init_hwerrors(dd);
+ /* for Rev2 error interrupts; nop for rev 1 */
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ /* clear the reset error, init error/hwerror mask */
+ qib_6120_init_hwerrors(dd);
+ }
+ return ret;
+}
+
+/**
+ * qib_6120_put_tid - write a TID in chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for special locking etc.
+ * It's used for both the full cleanup on exit, as well as the normal
+ * setup and teardown.
+ */
+static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
+ unsigned long flags;
+ int tidx;
+ spinlock_t *tidlockp; /* select appropriate spinlock */
+
+ if (!dd->kregbase)
+ return;
+
+ if (pa != dd->tidinvalid) {
+ if (pa & ((1U << 11) - 1)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ pa >>= 11;
+ if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ pa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ pa |= 2 << 29;
+ }
+
+ /*
+ * Avoid chip issue by writing the scratch register
+ * before and after the TID, and with an io write barrier.
+ * We use a spinlock around the writes, so they can't intermix
+ * with other TID (eager or expected) writes (the chip problem
+ * is triggered by back to back TID writes). Unfortunately, this
+ * call can be done from interrupt level for the ctxt 0 eager TIDs,
+ * so we have to use irqsave locks.
+ */
+ /*
+ * Assumes tidptr always > egrtidbase
+ * if type == RCVHQ_RCV_TYPE_EAGER.
+ */
+ tidx = tidptr - dd->egrtidbase;
+
+ tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
+ ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
+ spin_lock_irqsave(tidlockp, flags);
+ qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
+ writel(pa, tidp32);
+ qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
+ mmiowb();
+ spin_unlock_irqrestore(tidlockp, flags);
+}
+
+/**
+ * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for selection of the
+ * appropriate "flavor". The static calls in cleanup just use the
+ * revision-agnostic form, as they are not performance critical.
+ */
+static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
+ u32 tidx;
+
+ if (!dd->kregbase)
+ return;
+
+ if (pa != dd->tidinvalid) {
+ if (pa & ((1U << 11) - 1)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ pa >>= 11;
+ if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ pa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ pa |= 2 << 29;
+ }
+ tidx = tidptr - dd->egrtidbase;
+ writel(pa, tidp32);
+ mmiowb();
+}
+
+
+/**
+ * qib_6120_clear_tids - clear all TID entries for a context, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the context
+ *
+ * clear all TID entries for a context, expected and eager.
+ * Used from qib_close(). On this chip, TIDs are only 32 bits,
+ * not 64, but they are still on 64 bit boundaries, so tidbase
+ * is declared as u64 * for the pointer math, even though we write 32 bits
+ */
+static void qib_6120_clear_tids(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd)
+{
+ u64 __iomem *tidbase;
+ unsigned long tidinv;
+ u32 ctxt;
+ int i;
+
+ if (!dd->kregbase || !rcd)
+ return;
+
+ ctxt = rcd->ctxt;
+
+ tidinv = dd->tidinvalid;
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvtidbase +
+ ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+ for (i = 0; i < dd->rcvtidcnt; i++)
+ /* use func pointer because could be one of two funcs */
+ dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+ tidinv);
+
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvegrbase +
+ rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+ for (i = 0; i < rcd->rcvegrcnt; i++)
+ /* use func pointer because could be one of two funcs */
+ dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+ tidinv);
+}
+
+/**
+ * qib_6120_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_6120_tidtemplate(struct qib_devdata *dd)
+{
+ u32 egrsize = dd->rcvegrbufsize;
+
+ /*
+ * For now, we always allocate 4KB buffers (at init) so we can
+ * receive max size packets. We may want a module parameter to
+ * specify 2KB or 4KB and/or make be per ctxt instead of per device
+ * for those who want to reduce memory footprint. Note that the
+ * rcvhdrentsize size must be large enough to hold the largest
+ * IB header (currently 96 bytes) that we expect to handle (plus of
+ * course the 2 dwords of RHF).
+ */
+ if (egrsize == 2048)
+ dd->tidtemplate = 1U << 29;
+ else if (egrsize == 4096)
+ dd->tidtemplate = 2U << 29;
+ dd->tidinvalid = 0;
+}
+
+int __attribute__((weak)) qib_unordered_wc(void)
+{
+ return 0;
+}
+
+/**
+ * qib_6120_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithms.
+ */
+static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,
+ struct qib_base_info *kinfo)
+{
+ if (qib_unordered_wc())
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;
+
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
+ QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;
+ return 0;
+}
+
+
+static struct qib_message_header *
+qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+ return (struct qib_message_header *)
+ &rhf_addr[sizeof(u64) / sizeof(u32)];
+}
+
+static void qib_6120_config_ctxts(struct qib_devdata *dd)
+{
+ dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
+ if (qib_n_krcv_queues > 1) {
+ dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
+ if (dd->first_user_ctxt > dd->ctxtcnt)
+ dd->first_user_ctxt = dd->ctxtcnt;
+ dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
+ } else
+ dd->first_user_ctxt = dd->num_pports;
+ dd->n_krcv_queues = dd->first_user_ctxt;
+}
+
+static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ u32 updegr, u32 egrhd)
+{
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
+{
+ u32 head, tail;
+
+ head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+ if (rcd->rcvhdrtail_kvaddr)
+ tail = qib_get_rcvhdrtail(rcd);
+ else
+ tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+ return head == tail;
+}
+
+/*
+ * Used when we close any ctxt, for DMA already in flight
+ * at close. Can't be done until we know hdrq size, so not
+ * early in chip init.
+ */
+static void alloc_dummy_hdrq(struct qib_devdata *dd)
+{
+ dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
+ dd->rcd[0]->rcvhdrq_size,
+ &dd->cspec->dummy_hdrq_phys,
+ GFP_KERNEL | __GFP_COMP);
+ if (!dd->cspec->dummy_hdrq) {
+ qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
+ /* fallback to just 0'ing */
+ dd->cspec->dummy_hdrq_phys = 0UL;
+ }
+}
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specific, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,
+ int ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 mask, val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+
+ if (op & QIB_RCVCTRL_TAILUPD_ENB)
+ dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_TAILUPD_DIS)
+ dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_ENB)
+ dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_DIS)
+ dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
+ if (ctxt < 0)
+ mask = (1ULL << dd->ctxtcnt) - 1;
+ else
+ mask = (1ULL << ctxt);
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /* always done for specific ctxt */
+ dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (!(dd->flags & QIB_NODMA_RTAIL))
+ dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
+ /* Write these registers before the context is enabled. */
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrq_phys);
+
+ if (ctxt == 0 && !dd->cspec->dummy_hdrq)
+ alloc_dummy_hdrq(dd);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+ dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
+ if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+ dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
+ /* arm rcv interrupt */
+ val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
+ dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /*
+ * Init the context registers also; if we were
+ * disabled, tail and head should both be zero
+ * already from the enable, but since we don't
+ * know, we have to do it explictly.
+ */
+ val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+ qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+ val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+ dd->rcd[ctxt]->head = val;
+ /* If kctxt, interrupt on next receive. */
+ if (ctxt < dd->first_user_ctxt)
+ val |= dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS) {
+ /*
+ * Be paranoid, and never write 0's to these, just use an
+ * unused page. Of course,
+ * rcvhdraddr points to a large chunk of memory, so this
+ * could still trash things, but at least it won't trash
+ * page 0, and by disabling the ctxt, it should stop "soon",
+ * even if a packet or two is in already in flight after we
+ * disabled the ctxt. Only 6120 has this issue.
+ */
+ if (ctxt >= 0) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+ dd->cspec->dummy_hdrq_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+ dd->cspec->dummy_hdrq_phys);
+ } else {
+ unsigned i;
+
+ for (i = 0; i < dd->cfgctxts; i++) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
+ i, dd->cspec->dummy_hdrq_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
+ i, dd->cspec->dummy_hdrq_phys);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function there may be multiple such registers with
+ * slightly different layouts. Only operations actually used
+ * are implemented yet.
+ * Chip requires no back-back sendctrl writes, so write
+ * scratch register after writing sendctrl
+ */
+static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 tmp_dd_sendctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+ /* First the ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_CLEAR)
+ dd->sendctrl = 0;
+ if (op & QIB_SENDCTRL_SEND_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
+ else if (op & QIB_SENDCTRL_SEND_ENB)
+ dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
+ if (op & QIB_SENDCTRL_AVAIL_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
+ else if (op & QIB_SENDCTRL_AVAIL_ENB)
+ dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
+
+ if (op & QIB_SENDCTRL_DISARM_ALL) {
+ u32 i, last;
+
+ tmp_dd_sendctrl = dd->sendctrl;
+ /*
+ * disarm any that are not yet launched, disabling sends
+ * and updates until done.
+ */
+ last = dd->piobcnt2k + dd->piobcnt4k;
+ tmp_dd_sendctrl &=
+ ~(SYM_MASK(SendCtrl, PIOEnable) |
+ SYM_MASK(SendCtrl, PIOBufAvailUpd));
+ for (i = 0; i < last; i++) {
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
+ SYM_MASK(SendCtrl, Disarm) | i);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ }
+
+ tmp_dd_sendctrl = dd->sendctrl;
+
+ if (op & QIB_SENDCTRL_FLUSH)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
+ if (op & QIB_SENDCTRL_DISARM)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+ ((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<
+ SYM_LSB(SendCtrl, DisarmPIOBuf));
+ if (op & QIB_SENDCTRL_AVAIL_BLIP)
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
+
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u32 v;
+ /*
+ * ensure writes have hit chip, then do a few
+ * more reads, to allow DMA of pioavail registers
+ * to occur, so in-memory copy is in sync with
+ * the chip. Not always safe to sleep.
+ */
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+}
+
+/**
+ * qib_portcntr_6120 - read a per-port counter
+ * @dd: the qlogic_ib device
+ * @creg: the counter to snapshot
+ */
+static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
+{
+ u64 ret = 0ULL;
+ struct qib_devdata *dd = ppd->dd;
+ u16 creg;
+ /* 0xffff for unimplemented or synthesized counters */
+ static const u16 xlator[] = {
+ [QIBPORTCNTR_PKTSEND] = cr_pktsend,
+ [QIBPORTCNTR_WORDSEND] = cr_wordsend,
+ [QIBPORTCNTR_PSXMITDATA] = 0xffff,
+ [QIBPORTCNTR_PSXMITPKTS] = 0xffff,
+ [QIBPORTCNTR_PSXMITWAIT] = 0xffff,
+ [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
+ [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
+ [QIBPORTCNTR_PSRCVDATA] = 0xffff,
+ [QIBPORTCNTR_PSRCVPKTS] = 0xffff,
+ [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
+ [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
+ [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
+ [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
+ [QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,
+ [QIBPORTCNTR_RXVLERR] = 0xffff,
+ [QIBPORTCNTR_ERRICRC] = cr_erricrc,
+ [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
+ [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
+ [QIBPORTCNTR_BADFORMAT] = cr_badformat,
+ [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
+ [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
+ [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
+ [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
+ [QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,
+ [QIBPORTCNTR_ERRLINK] = cr_errlink,
+ [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
+ [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
+ [QIBPORTCNTR_LLI] = 0xffff,
+ [QIBPORTCNTR_PSINTERVAL] = 0xffff,
+ [QIBPORTCNTR_PSSTART] = 0xffff,
+ [QIBPORTCNTR_PSSTAT] = 0xffff,
+ [QIBPORTCNTR_VL15PKTDROP] = 0xffff,
+ [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
+ [QIBPORTCNTR_KHDROVFL] = 0xffff,
+ };
+
+ if (reg >= ARRAY_SIZE(xlator)) {
+ qib_devinfo(ppd->dd->pcidev,
+ "Unimplemented portcounter %u\n", reg);
+ goto done;
+ }
+ creg = xlator[reg];
+
+ /* handle counters requests not implemented as chip counters */
+ if (reg == QIBPORTCNTR_LLI)
+ ret = dd->cspec->lli_errs;
+ else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)
+ ret = dd->cspec->overrun_thresh_errs;
+ else if (reg == QIBPORTCNTR_KHDROVFL) {
+ int i;
+
+ /* sum over all kernel contexts */
+ for (i = 0; i < dd->first_user_ctxt; i++)
+ ret += read_6120_creg32(dd, cr_portovfl + i);
+ } else if (reg == QIBPORTCNTR_PSSTAT)
+ ret = dd->cspec->pma_sample_status;
+ if (creg == 0xffff)
+ goto done;
+
+ /*
+ * only fast incrementing counters are 64bit; use 32 bit reads to
+ * avoid two independent reads when on opteron
+ */
+ if (creg == cr_wordsend || creg == cr_wordrcv ||
+ creg == cr_pktsend || creg == cr_pktrcv)
+ ret = read_6120_creg(dd, creg);
+ else
+ ret = read_6120_creg32(dd, creg);
+ if (creg == cr_ibsymbolerr) {
+ if (dd->cspec->ibdeltainprog)
+ ret -= ret - dd->cspec->ibsymsnap;
+ ret -= dd->cspec->ibsymdelta;
+ } else if (creg == cr_iblinkerrrecov) {
+ if (dd->cspec->ibdeltainprog)
+ ret -= ret - dd->cspec->iblnkerrsnap;
+ ret -= dd->cspec->iblnkerrdelta;
+ }
+ if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */
+ ret += dd->cspec->rxfc_unsupvl_errs;
+
+done:
+ return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string. Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility. Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr6120indices contains the corresponding register indices.
+ */
+static const char cntr6120names[] =
+ "Interrupts\n"
+ "HostBusStall\n"
+ "E RxTIDFull\n"
+ "RxTIDInvalid\n"
+ "Ctxt0EgrOvfl\n"
+ "Ctxt1EgrOvfl\n"
+ "Ctxt2EgrOvfl\n"
+ "Ctxt3EgrOvfl\n"
+ "Ctxt4EgrOvfl\n";
+
+static const size_t cntr6120indices[] = {
+ cr_lbint,
+ cr_lbflowstall,
+ cr_errtidfull,
+ cr_errtidvalid,
+ cr_portovfl + 0,
+ cr_portovfl + 1,
+ cr_portovfl + 2,
+ cr_portovfl + 3,
+ cr_portovfl + 4,
+};
+
+/*
+ * same as cntr6120names and cntr6120indices, but for port-specific counters.
+ * portcntr6120indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr6120names[] =
+ "TxPkt\n"
+ "TxFlowPkt\n"
+ "TxWords\n"
+ "RxPkt\n"
+ "RxFlowPkt\n"
+ "RxWords\n"
+ "TxFlowStall\n"
+ "E IBStatusChng\n"
+ "IBLinkDown\n"
+ "IBLnkRecov\n"
+ "IBRxLinkErr\n"
+ "IBSymbolErr\n"
+ "RxLLIErr\n"
+ "RxBadFormat\n"
+ "RxBadLen\n"
+ "RxBufOvrfl\n"
+ "RxEBP\n"
+ "RxFlowCtlErr\n"
+ "RxICRCerr\n"
+ "RxLPCRCerr\n"
+ "RxVCRCerr\n"
+ "RxInvalLen\n"
+ "RxInvalPKey\n"
+ "RxPktDropped\n"
+ "TxBadLength\n"
+ "TxDropped\n"
+ "TxInvalLen\n"
+ "TxUnderrun\n"
+ "TxUnsupVL\n"
+ ;
+
+#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
+static const size_t portcntr6120indices[] = {
+ QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+ cr_pktsendflow,
+ QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+ cr_pktrcvflowctrl,
+ QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+ cr_ibstatuschange,
+ QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+ cr_rcvflowctrl_err,
+ QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+ cr_invalidslen,
+ cr_senddropped,
+ cr_errslen,
+ cr_sendunderrun,
+ cr_txunsupvl,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_6120_cntrnames(struct qib_devdata *dd)
+{
+ int i, j = 0;
+ char *s;
+
+ for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
+ i++) {
+ /* we always have at least one counter before the egrovfl */
+ if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+ j = 1;
+ s = strchr(s + 1, '\n');
+ if (s && j)
+ j++;
+ }
+ dd->cspec->ncntrs = i;
+ if (!s)
+ /* full list; size is without terminating null */
+ dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
+ else
+ dd->cspec->cntrnamelen = 1 + s - cntr6120names;
+ dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->cntrs)
+ qib_dev_err(dd, "Failed allocation for counters\n");
+
+ for (i = 0, s = (char *)portcntr6120names; s; i++)
+ s = strchr(s + 1, '\n');
+ dd->cspec->nportcntrs = i - 1;
+ dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
+ dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->portcntrs)
+ qib_dev_err(dd, "Failed allocation for portcounters\n");
+}
+
+static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+ u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->cntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *)cntr6120names;
+ } else {
+ u64 *cntr = dd->cspec->cntrs;
+ int i;
+
+ ret = dd->cspec->ncntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ if (pos >= ret) {
+ ret = 0; /* final read after getting everything */
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->ncntrs; i++)
+ *cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
+ }
+done:
+ return ret;
+}
+
+static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+ char **namep, u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->portcntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *)portcntr6120names;
+ } else {
+ u64 *cntr = dd->cspec->portcntrs;
+ struct qib_pportdata *ppd = &dd->pport[port];
+ int i;
+
+ ret = dd->cspec->nportcntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->nportcntrs; i++) {
+ if (portcntr6120indices[i] & _PORT_VIRT_FLAG)
+ *cntr++ = qib_portcntr_6120(ppd,
+ portcntr6120indices[i] &
+ ~_PORT_VIRT_FLAG);
+ else
+ *cntr++ = read_6120_creg32(dd,
+ portcntr6120indices[i]);
+ }
+ }
+done:
+ return ret;
+}
+
+static void qib_chk_6120_errormask(struct qib_devdata *dd)
+{
+ static u32 fixed;
+ u32 ctrl;
+ unsigned long errormask;
+ unsigned long hwerrs;
+
+ if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
+ return;
+
+ errormask = qib_read_kreg64(dd, kr_errmask);
+
+ if (errormask == dd->cspec->errormask)
+ return;
+ fixed++;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ ctrl = qib_read_kreg32(dd, kr_control);
+
+ qib_write_kreg(dd, kr_errmask,
+ dd->cspec->errormask);
+
+ if ((hwerrs & dd->cspec->hwerrmask) ||
+ (ctrl & QLOGIC_IB_C_FREEZEMODE)) {
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, 0ULL);
+ /* force re-interrupt of pending events, just in case */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ qib_devinfo(dd->pcidev,
+ "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",
+ fixed, errormask, (unsigned long)dd->cspec->errormask,
+ ctrl, hwerrs);
+ }
+}
+
+/**
+ * qib_get_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * This needs more work; in particular, decision on whether we really
+ * need traffic_wds done the way it is
+ * called from add_timer
+ */
+static void qib_get_6120_faststats(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_pportdata *ppd = dd->pport;
+ unsigned long flags;
+ u64 traffic_wds;
+
+ /*
+ * don't access the chip while running diags, or memory diags can
+ * fail
+ */
+ if (!(dd->flags & QIB_INITTED) || dd->diag_client)
+ /* but re-arm the timer, for diags case; won't hurt other */
+ goto done;
+
+ /*
+ * We now try to maintain an activity timer, based on traffic
+ * exceeding a threshold, so we need to check the word-counts
+ * even if they are 64-bit.
+ */
+ traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +
+ qib_portcntr_6120(ppd, cr_wordrcv);
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ traffic_wds -= dd->traffic_wds;
+ dd->traffic_wds += traffic_wds;
+ if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+ atomic_add(5, &dd->active_time); /* S/B #define */
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+
+ qib_chk_6120_errormask(dd);
+done:
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/* no interrupt fallback for these chips */
+static int qib_6120_nointr_fallback(struct qib_devdata *dd)
+{
+ return 0;
+}
+
+/*
+ * reset the XGXS (between serdes and IBC). Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining. To do this right, we reset IBC
+ * as well.
+ */
+static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)
+{
+ u64 val, prev_val;
+ struct qib_devdata *dd = ppd->dd;
+
+ prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ val = prev_val | QLOGIC_IB_XGXS_RESET;
+ prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
+ qib_write_kreg(dd, kr_control,
+ dd->control & ~QLOGIC_IB_C_LINKENABLE);
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+ qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
+ qib_write_kreg(dd, kr_control, dd->control);
+}
+
+static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+ int ret;
+
+ switch (which) {
+ case QIB_IB_CFG_LWID:
+ ret = ppd->link_width_active;
+ break;
+
+ case QIB_IB_CFG_SPD:
+ ret = ppd->link_speed_active;
+ break;
+
+ case QIB_IB_CFG_LWID_ENB:
+ ret = ppd->link_width_enabled;
+ break;
+
+ case QIB_IB_CFG_SPD_ENB:
+ ret = ppd->link_speed_enabled;
+ break;
+
+ case QIB_IB_CFG_OP_VLS:
+ ret = ppd->vls_operational;
+ break;
+
+ case QIB_IB_CFG_VL_HIGH_CAP:
+ ret = 0;
+ break;
+
+ case QIB_IB_CFG_VL_LOW_CAP:
+ ret = 0;
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ break;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ break;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ ret = (ppd->dd->cspec->ibcctrl &
+ SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
+ IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+ break;
+
+ case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+ ret = 0; /* no heartbeat on this chip */
+ break;
+
+ case QIB_IB_CFG_PMA_TICKS:
+ ret = 250; /* 1 usec. */
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * We assume range checking is already done, if needed.
+ */
+static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret = 0;
+ u64 val64;
+ u16 lcmd, licmd;
+
+ switch (which) {
+ case QIB_IB_CFG_LWID_ENB:
+ ppd->link_width_enabled = val;
+ break;
+
+ case QIB_IB_CFG_SPD_ENB:
+ ppd->link_speed_enabled = val;
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ if (val64 != val) {
+ dd->cspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, OverrunThreshold);
+ dd->cspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, OverrunThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ break;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ if (val64 != val) {
+ dd->cspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, PhyerrThreshold);
+ dd->cspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, PhyerrThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ break;
+
+ case QIB_IB_CFG_PKEYS: /* update pkeys */
+ val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+ qib_write_kreg(dd, kr_partitionkey, val64);
+ break;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ if (val == IB_LINKINITCMD_POLL)
+ dd->cspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ else /* SLEEP */
+ dd->cspec->ibcctrl |=
+ SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ break;
+
+ case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+ /*
+ * Update our housekeeping variables, and set IBC max
+ * size, same as init code; max IBC is max we allow in
+ * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+ * Set even if it's unchanged, print debug message only
+ * on changes.
+ */
+ val = (ppd->ibmaxlen >> 2) + 1;
+ dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
+ dd->cspec->ibcctrl |= (u64)val <<
+ SYM_LSB(IBCCtrl, MaxPktLen);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ break;
+
+ case QIB_IB_CFG_LSTATE: /* set the IB link state */
+ switch (val & 0xffff0000) {
+ case IB_LINKCMD_DOWN:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+ if (!dd->cspec->ibdeltainprog) {
+ dd->cspec->ibdeltainprog = 1;
+ dd->cspec->ibsymsnap =
+ read_6120_creg32(dd, cr_ibsymbolerr);
+ dd->cspec->iblnkerrsnap =
+ read_6120_creg32(dd, cr_iblinkerrrecov);
+ }
+ break;
+
+ case IB_LINKCMD_ARMED:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+ break;
+
+ case IB_LINKCMD_ACTIVE:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+ goto bail;
+ }
+ switch (val & 0xffff) {
+ case IB_LINKINITCMD_NOP:
+ licmd = 0;
+ break;
+
+ case IB_LINKINITCMD_POLL:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+ break;
+
+ case IB_LINKINITCMD_SLEEP:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+ break;
+
+ case IB_LINKINITCMD_DISABLE:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+ val & 0xffff);
+ goto bail;
+ }
+ qib_set_ib_6120_lstate(ppd, lcmd, licmd);
+ goto bail;
+
+ case QIB_IB_CFG_HRTBT:
+ ret = -EINVAL;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+bail:
+ return ret;
+}
+
+static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+ int ret = 0;
+ if (!strncmp(what, "ibc", 3)) {
+ ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
+ qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+ ppd->dd->unit, ppd->port);
+ } else if (!strncmp(what, "off", 3)) {
+ ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
+ qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+ "(normal)\n", ppd->dd->unit, ppd->port);
+ } else
+ ret = -EINVAL;
+ if (!ret) {
+ qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ }
+ return ret;
+}
+
+static void pma_6120_timer(unsigned long data)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)data;
+ struct qib_chip_specific *cs = ppd->dd->cspec;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
+ qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
+ &cs->spkts, &cs->rpkts, &cs->xmit_wait);
+ mod_timer(&cs->pma_timer,
+ jiffies + usecs_to_jiffies(ibp->pma_sample_interval));
+ } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
+ u64 ta, tb, tc, td, te;
+
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
+
+ cs->sword = ta - cs->sword;
+ cs->rword = tb - cs->rword;
+ cs->spkts = tc - cs->spkts;
+ cs->rpkts = td - cs->rpkts;
+ cs->xmit_wait = te - cs->xmit_wait;
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+}
+
+/*
+ * Note that the caller has the ibp->lock held.
+ */
+static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
+ u32 start)
+{
+ struct qib_chip_specific *cs = ppd->dd->cspec;
+
+ if (start && intv) {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
+ mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));
+ } else if (intv) {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
+ qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
+ &cs->spkts, &cs->rpkts, &cs->xmit_wait);
+ mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));
+ } else {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ cs->sword = 0;
+ cs->rword = 0;
+ cs->spkts = 0;
+ cs->rpkts = 0;
+ cs->xmit_wait = 0;
+ }
+}
+
+static u32 qib_6120_iblink_state(u64 ibcs)
+{
+ u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
+
+ switch (state) {
+ case IB_6120_L_STATE_INIT:
+ state = IB_PORT_INIT;
+ break;
+ case IB_6120_L_STATE_ARM:
+ state = IB_PORT_ARMED;
+ break;
+ case IB_6120_L_STATE_ACTIVE:
+ /* fall through */
+ case IB_6120_L_STATE_ACT_DEFER:
+ state = IB_PORT_ACTIVE;
+ break;
+ default: /* fall through */
+ case IB_6120_L_STATE_DOWN:
+ state = IB_PORT_DOWN;
+ break;
+ }
+ return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_6120_phys_portstate(u64 ibcs)
+{
+ u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
+ return qib_6120_physportstate[state];
+}
+
+static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ if (ibup) {
+ if (ppd->dd->cspec->ibdeltainprog) {
+ ppd->dd->cspec->ibdeltainprog = 0;
+ ppd->dd->cspec->ibsymdelta +=
+ read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
+ ppd->dd->cspec->ibsymsnap;
+ ppd->dd->cspec->iblnkerrdelta +=
+ read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
+ ppd->dd->cspec->iblnkerrsnap;
+ }
+ qib_hol_init(ppd);
+ } else {
+ ppd->dd->cspec->lli_counter = 0;
+ if (!ppd->dd->cspec->ibdeltainprog) {
+ ppd->dd->cspec->ibdeltainprog = 1;
+ ppd->dd->cspec->ibsymsnap =
+ read_6120_creg32(ppd->dd, cr_ibsymbolerr);
+ ppd->dd->cspec->iblnkerrsnap =
+ read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
+ }
+ qib_hol_down(ppd);
+ }
+
+ qib_6120_setup_setextled(ppd, ibup);
+
+ return 0;
+}
+
+/* Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+ u64 read_val, new_out;
+ unsigned long flags;
+
+ if (mask) {
+ /* some bits being written, lock access to GPIO */
+ dir &= mask;
+ out &= mask;
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+ dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+ new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_out, new_out);
+ dd->cspec->gpio_out = new_out;
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ }
+ /*
+ * It is unlikely that a read at this time would get valid
+ * data on a pin whose direction line was set in the same
+ * call to this function. We include the read here because
+ * that allows us to potentially combine a change on one pin with
+ * a read on another, and because the old code did something like
+ * this.
+ */
+ read_val = qib_read_kreg64(dd, kr_extstatus);
+ return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/*
+ * Read fundamental info we need to use the chip. These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_6120_chip_params(struct qib_devdata *dd)
+{
+ u64 val;
+ u32 piobufs;
+ int mtu;
+
+ dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+ dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+ dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+ dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+ dd->palign = qib_read_kreg32(dd, kr_palign);
+ dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+ dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+ dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+
+ val = qib_read_kreg64(dd, kr_sendpiosize);
+ dd->piosize2k = val & ~0U;
+ dd->piosize4k = val >> 32;
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+ dd->pport->ibmtu = (u32)mtu;
+
+ val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+ dd->piobcnt2k = val & ~0U;
+ dd->piobcnt4k = val >> 32;
+ /* these may be adjusted in init_chip_wc_pat() */
+ dd->pio2kbase = (u32 __iomem *)
+ (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
+ if (dd->piobcnt4k) {
+ dd->pio4kbase = (u32 __iomem *)
+ (((char __iomem *) dd->kregbase) +
+ (dd->piobufbase >> 32));
+ /*
+ * 4K buffers take 2 pages; we use roundup just to be
+ * paranoid; we calculate it once here, rather than on
+ * ever buf allocate
+ */
+ dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+ }
+
+ piobufs = dd->piobcnt4k + dd->piobcnt2k;
+
+ dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+ (sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * get_6120_chip_params(), so split out as separate function
+ */
+static void set_6120_baseaddrs(struct qib_devdata *dd)
+{
+ u32 cregbase;
+ cregbase = qib_read_kreg32(dd, kr_counterregbase);
+ dd->cspec->cregbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + cregbase);
+
+ dd->egrtidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->rcvegrbase);
+}
+
+/*
+ * Write the final few registers that depend on some of the
+ * init setup. Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_6120_initreg(struct qib_devdata *dd)
+{
+ int ret = 0;
+ u64 val;
+
+ qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+ qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+ qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+ qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+ val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+ if (val != dd->pioavailregs_phys) {
+ qib_dev_err(dd, "Catastrophic software error, "
+ "SendPIOAvailAddr written as %lx, "
+ "read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int init_6120_variables(struct qib_devdata *dd)
+{
+ int ret = 0;
+ struct qib_pportdata *ppd;
+ u32 sbufs;
+
+ ppd = (struct qib_pportdata *)(dd + 1);
+ dd->pport = ppd;
+ dd->num_pports = 1;
+
+ dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
+ ppd->cpspec = NULL; /* not used in this chip */
+
+ spin_lock_init(&dd->cspec->kernel_tid_lock);
+ spin_lock_init(&dd->cspec->user_tid_lock);
+ spin_lock_init(&dd->cspec->rcvmod_lock);
+ spin_lock_init(&dd->cspec->gpio_lock);
+
+ /* we haven't yet set QIB_PRESENT, so use read directly */
+ dd->revision = readq(&dd->kregbase[kr_revision]);
+
+ if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+ qib_dev_err(dd, "Revision register read failure, "
+ "giving up initialization\n");
+ ret = -ENODEV;
+ goto bail;
+ }
+ dd->flags |= QIB_PRESENT; /* now register routines work */
+
+ dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMajor);
+ dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMinor);
+
+ get_6120_chip_params(dd);
+ pe_boardname(dd); /* fill in boardname */
+
+ /*
+ * GPIO bits for TWSI data and clock,
+ * used for serial EEPROM.
+ */
+ dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+ dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+ dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
+
+ if (qib_unordered_wc())
+ dd->flags |= QIB_PIO_FLUSH_WC;
+
+ /*
+ * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+ * 2 is Some Misc, 3 is reserved for future.
+ */
+ dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
+
+ /* Ignore errors in PIO/PBC on systems with unordered write-combining */
+ if (qib_unordered_wc())
+ dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
+
+ dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
+
+ dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
+
+ qib_init_pportdata(ppd, dd, 0, 1);
+ ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+ ppd->link_speed_supported = QIB_IB_SDR;
+ ppd->link_width_enabled = IB_WIDTH_4X;
+ ppd->link_speed_enabled = ppd->link_speed_supported;
+ /* these can't change for this chip, so set once */
+ ppd->link_width_active = ppd->link_width_enabled;
+ ppd->link_speed_active = ppd->link_speed_enabled;
+ ppd->vls_supported = IB_VL_VL0;
+ ppd->vls_operational = ppd->vls_supported;
+
+ dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+ dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+ dd->rhf_offset = 0;
+
+ /* we always allocate at least 2048 bytes for eager buffers */
+ ret = ib_mtu_enum_to_int(qib_ibmtu);
+ dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+
+ qib_6120_tidtemplate(dd);
+
+ /*
+ * We can request a receive interrupt for 1 or
+ * more packets from current offset. For now, we set this
+ * up for a single packet.
+ */
+ dd->rhdrhead_intr_off = 1ULL << 32;
+
+ /* setup the stats timer; the add_timer is done at end of init */
+ init_timer(&dd->stats_timer);
+ dd->stats_timer.function = qib_get_6120_faststats;
+ dd->stats_timer.data = (unsigned long) dd;
+
+ init_timer(&dd->cspec->pma_timer);
+ dd->cspec->pma_timer.function = pma_6120_timer;
+ dd->cspec->pma_timer.data = (unsigned long) ppd;
+
+ dd->ureg_align = qib_read_kreg32(dd, kr_palign);
+
+ dd->piosize2kmax_dwords = dd->piosize2k >> 2;
+ qib_6120_config_ctxts(dd);
+ qib_set_ctxtcnt(dd);
+
+ if (qib_wc_pat) {
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
+ }
+ set_6120_baseaddrs(dd); /* set chip access pointers now */
+
+ ret = 0;
+ if (qib_mini_init)
+ goto bail;
+
+ qib_num_cfg_vls = 1; /* if any 6120's, only one VL */
+
+ ret = qib_create_ctxts(dd);
+ init_6120_cntrnames(dd);
+
+ /* use all of 4KB buffers for the kernel, otherwise 16 */
+ sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16;
+
+ dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
+ dd->pbufsctxt = dd->lastctxt_piobuf /
+ (dd->cfgctxts - dd->first_user_ctxt);
+
+ if (ret)
+ goto bail;
+bail:
+ return ret;
+}
+
+/*
+ * For this chip, we want to use the same buffer every time
+ * when we are trying to bring the link up (they are always VL15
+ * packets). At that link state the packet should always go out immediately
+ * (or at least be discarded at the tx interface if the link is down).
+ * If it doesn't, and the buffer isn't available, that means some other
+ * sender has gotten ahead of us, and is preventing our packet from going
+ * out. In that case, we flush all packets, and try again. If that still
+ * fails, we fail the request, and hope things work the next time around.
+ *
+ * We don't need very complicated heuristics on whether the packet had
+ * time to go out or not, since even at SDR 1X, it goes out in very short
+ * time periods, covered by the chip reads done here and as part of the
+ * flush.
+ */
+static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)
+{
+ u32 __iomem *buf;
+ u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
+
+ /*
+ * always blip to get avail list updated, since it's almost
+ * always needed, and is fairly cheap.
+ */
+ sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+ if (buf)
+ goto done;
+
+ sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
+ QIB_SENDCTRL_AVAIL_BLIP);
+ ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+done:
+ return buf;
+}
+
+static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+ u32 *pbufnum)
+{
+ u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+ struct qib_devdata *dd = ppd->dd;
+ u32 __iomem *buf;
+
+ if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
+ buf = get_6120_link_buf(ppd, pbufnum);
+ else {
+
+ if ((plen + 1) > dd->piosize2kmax_dwords)
+ first = dd->piobcnt2k;
+ else
+ first = 0;
+ /* try 4k if all 2k busy, so same last for both sizes */
+ last = dd->piobcnt2k + dd->piobcnt4k - 1;
+ buf = qib_getsendbuf_range(dd, pbufnum, first, last);
+ }
+ return buf;
+}
+
+static int init_sdma_6120_regs(struct qib_pportdata *ppd)
+{
+ return -ENODEV;
+}
+
+static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)
+{
+ return 0;
+}
+
+static int qib_sdma_6120_busy(struct qib_pportdata *ppd)
+{
+ return 0;
+}
+
+static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)
+{
+}
+
+static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+}
+
+static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+}
+
+/*
+ * the pbc doesn't need a VL15 indicator, but we need it for link_buf.
+ * The chip ignores the bit if set.
+ */
+static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+ u8 srate, u8 vl)
+{
+ return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;
+}
+
+static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
+{
+}
+
+static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)
+{
+ rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
+ rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;
+}
+
+static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
+ u32 len, u32 avail, struct qib_ctxtdata *rcd)
+{
+}
+
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+ (void) qib_write_kreg(dd, kr_scratch, val);
+}
+
+static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+ return -ENXIO;
+}
+
+/* Dummy function, as 6120 boards never disable EEPROM Write */
+static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+ return 1;
+}
+
+/**
+ * qib_init_iba6120_funcs - set up the chip-specific function pointers
+ * @pdev: pci_dev of the qlogic_ib device
+ * @ent: pci_device_id matching this chip
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ *
+ * It also allocates/partially-inits the qib_devdata struct for
+ * this device.
+ */
+struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct qib_devdata *dd;
+ int ret;
+
+ dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
+ sizeof(struct qib_chip_specific));
+ if (IS_ERR(dd))
+ goto bail;
+
+ dd->f_bringup_serdes = qib_6120_bringup_serdes;
+ dd->f_cleanup = qib_6120_setup_cleanup;
+ dd->f_clear_tids = qib_6120_clear_tids;
+ dd->f_free_irq = qib_6120_free_irq;
+ dd->f_get_base_info = qib_6120_get_base_info;
+ dd->f_get_msgheader = qib_6120_get_msgheader;
+ dd->f_getsendbuf = qib_6120_getsendbuf;
+ dd->f_gpio_mod = gpio_6120_mod;
+ dd->f_eeprom_wen = qib_6120_eeprom_wen;
+ dd->f_hdrqempty = qib_6120_hdrqempty;
+ dd->f_ib_updown = qib_6120_ib_updown;
+ dd->f_init_ctxt = qib_6120_init_ctxt;
+ dd->f_initvl15_bufs = qib_6120_initvl15_bufs;
+ dd->f_intr_fallback = qib_6120_nointr_fallback;
+ dd->f_late_initreg = qib_late_6120_initreg;
+ dd->f_setpbc_control = qib_6120_setpbc_control;
+ dd->f_portcntr = qib_portcntr_6120;
+ dd->f_put_tid = (dd->minrev >= 2) ?
+ qib_6120_put_tid_2 :
+ qib_6120_put_tid;
+ dd->f_quiet_serdes = qib_6120_quiet_serdes;
+ dd->f_rcvctrl = rcvctrl_6120_mod;
+ dd->f_read_cntrs = qib_read_6120cntrs;
+ dd->f_read_portcntrs = qib_read_6120portcntrs;
+ dd->f_reset = qib_6120_setup_reset;
+ dd->f_init_sdma_regs = init_sdma_6120_regs;
+ dd->f_sdma_busy = qib_sdma_6120_busy;
+ dd->f_sdma_gethead = qib_sdma_6120_gethead;
+ dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl;
+ dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
+ dd->f_sdma_update_tail = qib_sdma_update_6120_tail;
+ dd->f_sendctrl = sendctrl_6120_mod;
+ dd->f_set_armlaunch = qib_set_6120_armlaunch;
+ dd->f_set_cntr_sample = qib_set_cntr_6120_sample;
+ dd->f_iblink_state = qib_6120_iblink_state;
+ dd->f_ibphys_portstate = qib_6120_phys_portstate;
+ dd->f_get_ib_cfg = qib_6120_get_ib_cfg;
+ dd->f_set_ib_cfg = qib_6120_set_ib_cfg;
+ dd->f_set_ib_loopback = qib_6120_set_loopback;
+ dd->f_set_intr_state = qib_6120_set_intr_state;
+ dd->f_setextled = qib_6120_setup_setextled;
+ dd->f_txchk_change = qib_6120_txchk_change;
+ dd->f_update_usrhead = qib_update_6120_usrhead;
+ dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr;
+ dd->f_xgxs_reset = qib_6120_xgxs_reset;
+ dd->f_writescratch = writescratch;
+ dd->f_tempsense_rd = qib_6120_tempsense_rd;
+ /*
+ * Do remaining pcie setup and save pcie values in dd.
+ * Any error printing is already done by the init code.
+ * On return, we have the chip mapped and accessible,
+ * but chip registers are not set up until start of
+ * init_6120_variables.
+ */
+ ret = qib_pcie_ddinit(dd, pdev, ent);
+ if (ret < 0)
+ goto bail_free;
+
+ /* initialize chip-specific variables */
+ ret = init_6120_variables(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ if (qib_mini_init)
+ goto bail;
+
+ if (qib_pcie_params(dd, 8, NULL, NULL))
+ qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+ dd->cspec->irq = pdev->irq; /* save IRQ */
+
+ /* clear diagctrl register, in case diags were running and crashed */
+ qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+ if (qib_read_kreg64(dd, kr_hwerrstatus) &
+ QLOGIC_IB_HWE_SERDESPLLFAILED)
+ qib_write_kreg(dd, kr_hwerrclear,
+ QLOGIC_IB_HWE_SERDESPLLFAILED);
+
+ /* setup interrupt handler (interrupt type handled above) */
+ qib_setup_6120_interrupt(dd);
+ /* Note that qpn_mask is set by qib_6120_config_ctxts() first */
+ qib_6120_init_hwerrors(dd);
+
+ goto bail;
+
+bail_cleanup:
+ qib_pcie_ddcleanup(dd);
+bail_free:
+ qib_free_devdata(dd);
+ dd = ERR_PTR(ret);
+bail:
+ return dd;
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
new file mode 100644
index 000000000000..6fd8d74e7392
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -0,0 +1,4618 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the
+ * QLogic_IB 7220 chip (except that specific to the SerDes)
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <rdma/ib_verbs.h>
+
+#include "qib.h"
+#include "qib_7220.h"
+
+static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
+static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
+static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
+static u32 qib_7220_iblink_state(u64);
+static u8 qib_7220_phys_portstate(u64);
+static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
+static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
+
+/*
+ * This file contains almost all the chip-specific register information and
+ * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
+ * exception of SerDes support, which in in qib_sd7220.c.
+ */
+
+/* Below uses machine-generated qib_chipnum_regs.h file */
+#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_control KREG_IDX(Control)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_ibcctrl KREG_IDX(IBCCtrl)
+#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
+#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
+#define kr_palign KREG_IDX(PageAlign)
+#define kr_partitionkey KREG_IDX(RcvPartitionKey)
+#define kr_portcnt KREG_IDX(PortCnt)
+#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
+#define kr_rcvctrl KREG_IDX(RcvCtrl)
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_revision KREG_IDX(Revision)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_senddmabase KREG_IDX(SendDmaBase)
+#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
+#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
+#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
+#define kr_senddmahead KREG_IDX(SendDmaHead)
+#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
+#define kr_senddmalengen KREG_IDX(SendDmaLenGen)
+#define kr_senddmastatus KREG_IDX(SendDmaStatus)
+#define kr_senddmatail KREG_IDX(SendDmaTail)
+#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
+#define kr_sendpiosize KREG_IDX(SendBufSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+
+/* These must only be written via qib_write_kreg_ctxt() */
+#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+
+#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
+ QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
+
+#define cr_badformat CREG_IDX(RxVersionErrCnt)
+#define cr_erricrc CREG_IDX(RxICRCErrCnt)
+#define cr_errlink CREG_IDX(RxLinkMalformCnt)
+#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
+#define cr_err_rlen CREG_IDX(RxLenErrCnt)
+#define cr_errslen CREG_IDX(TxLenErrCnt)
+#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
+#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
+#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define cr_lbint CREG_IDX(LBIntCnt)
+#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
+#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
+#define cr_pktrcv CREG_IDX(RxDataPktCnt)
+#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define cr_pktsend CREG_IDX(TxDataPktCnt)
+#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
+#define cr_rcvebp CREG_IDX(RxEBPCnt)
+#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
+#define cr_sendstall CREG_IDX(TxFlowStallCnt)
+#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
+#define cr_wordrcv CREG_IDX(RxDwordCnt)
+#define cr_wordsend CREG_IDX(TxDwordCnt)
+#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
+#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
+#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
+#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
+#define cr_rxvlerr CREG_IDX(RxVlErrCnt)
+#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
+#define cr_psstat CREG_IDX(PSStat)
+#define cr_psstart CREG_IDX(PSStart)
+#define cr_psinterval CREG_IDX(PSInterval)
+#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
+#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
+#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
+#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
+#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
+#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
+#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
+
+#define SYM_RMASK(regname, fldname) ((u64) \
+ QIB_7220_##regname##_##fldname##_RMASK)
+#define SYM_MASK(regname, fldname) ((u64) \
+ QIB_7220_##regname##_##fldname##_RMASK << \
+ QIB_7220_##regname##_##fldname##_LSB)
+#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
+#define SYM_FIELD(value, regname, fldname) ((u64) \
+ (((value) >> SYM_LSB(regname, fldname)) & \
+ SYM_RMASK(regname, fldname)))
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+
+#define BLOB_7220_IBCHG 0x81
+
+/*
+ * We could have a single register get/put routine, that takes a group type,
+ * but this is somewhat clearer and cleaner. It also gives us some error
+ * checking. 64 bit register reads should always work, but are inefficient
+ * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
+ * so we use kreg32 wherever possible. User register and counter register
+ * reads are always 32 bit reads, so only one form of those routines.
+ */
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+
+ if (dd->userbase)
+ return readl(regno + (u64 __iomem *)
+ ((char __iomem *)dd->userbase +
+ dd->ureg_align * ctxt));
+ else
+ return readl(regno + (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *)dd->kregbase +
+ dd->ureg_align * ctxt));
+}
+
+/**
+ * qib_write_ureg - write 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, u64 value, int ctxt)
+{
+ u64 __iomem *ubase;
+
+ if (dd->userbase)
+ ubase = (u64 __iomem *)
+ ((char __iomem *) dd->userbase +
+ dd->ureg_align * ctxt);
+ else
+ ubase = (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *) dd->kregbase +
+ dd->ureg_align * ctxt);
+
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &ubase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+ const u16 regno, unsigned ctxt,
+ u64 value)
+{
+ qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline void write_7220_creg(const struct qib_devdata *dd,
+ u16 regno, u64 value)
+{
+ if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->cspec->cregbase[regno]);
+}
+
+static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&dd->cspec->cregbase[regno]);
+}
+
+static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&dd->cspec->cregbase[regno]);
+}
+
+/* kr_revision bits */
+#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
+#define QLOGIC_IB_R_EMULATORREV_SHIFT 40
+
+/* kr_control bits */
+#define QLOGIC_IB_C_RESET (1U << 7)
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
+#define QLOGIC_IB_I_RCVURG_SHIFT 32
+#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
+#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
+#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
+
+#define QLOGIC_IB_C_FREEZEMODE 0x00000002
+#define QLOGIC_IB_C_LINKENABLE 0x00000004
+
+#define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL
+#define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL
+#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
+#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
+#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
+#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
+
+/* variables for sanity checking interrupt and errors */
+#define QLOGIC_IB_I_BITSEXTANT \
+ (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
+ (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
+ (QLOGIC_IB_I_RCVAVAIL_MASK << \
+ QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
+ QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
+ QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
+ QLOGIC_IB_I_SERDESTRIMDONE)
+
+#define IB_HWE_BITSEXTANT \
+ (HWE_MASK(RXEMemParityErr) | \
+ HWE_MASK(TXEMemParityErr) | \
+ (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
+ QLOGIC_IB_HWE_PCIE1PLLFAILED | \
+ QLOGIC_IB_HWE_PCIE0PLLFAILED | \
+ QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
+ QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
+ HWE_MASK(PowerOnBISTFailed) | \
+ QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP | \
+ QLOGIC_IB_HWE_SERDESPLLFAILED | \
+ HWE_MASK(IBCBusToSPCParityErr) | \
+ HWE_MASK(IBCBusFromSPCParityErr) | \
+ QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
+ QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
+ QLOGIC_IB_HWE_SDMAMEMREADERR | \
+ QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
+ QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
+ QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
+ QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
+ QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
+
+#define IB_E_BITSEXTANT \
+ (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
+ ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
+ ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
+ ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
+ ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
+ ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
+ ERR_MASK(SendSpecialTriggerErr) | \
+ ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \
+ ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendPioArmLaunchErr) | \
+ ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \
+ ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \
+ ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
+ ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
+ ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
+ ERR_MASK(SDmaUnexpDataErr) | \
+ ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \
+ ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \
+ ERR_MASK(SDmaDescAddrMisalignErr) | \
+ ERR_MASK(InvalidEEPCmd))
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
+#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
+#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
+#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
+#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
+#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
+/* specific to this chip */
+#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
+#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
+#define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL
+#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
+#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
+#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
+#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
+#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
+
+#define IBA7220_IBCC_LINKCMD_SHIFT 19
+
+/* kr_ibcddrctrl bits */
+#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
+#define IBA7220_IBC_DLIDLMC_SHIFT 32
+
+#define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
+ SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
+#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
+
+#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
+#define IBA7220_IBC_LREV_MASK 1
+#define IBA7220_IBC_LREV_SHIFT 8
+#define IBA7220_IBC_RXPOL_MASK 1
+#define IBA7220_IBC_RXPOL_SHIFT 7
+#define IBA7220_IBC_WIDTH_SHIFT 5
+#define IBA7220_IBC_WIDTH_MASK 0x3
+#define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_SPEED_AUTONEG (1 << 1)
+#define IBA7220_IBC_SPEED_SDR (1 << 2)
+#define IBA7220_IBC_SPEED_DDR (1 << 3)
+#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1)
+#define IBA7220_IBC_IBTA_1_2_MASK (1)
+
+/* kr_ibcddrstatus */
+/* link latency shift is 0, don't bother defining */
+#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
+
+/* kr_extstatus bits */
+#define QLOGIC_IB_EXTS_FREQSEL 0x2
+#define QLOGIC_IB_EXTS_SERDESSEL 0x4
+#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
+#define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000
+
+/* kr_xgxsconfig bits */
+#define QLOGIC_IB_XGXS_RESET 0x5ULL
+#define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63)
+
+/* kr_rcvpktledcnt */
+#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
+#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
+#define QIB_TWSI_TEMP_DEV 0x98
+
+/* HW counter clock is at 4nsec */
+#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
+
+#define IBA7220_R_INTRAVAIL_SHIFT 17
+#define IBA7220_R_PKEY_DIS_SHIFT 34
+#define IBA7220_R_TAILUPD_SHIFT 35
+#define IBA7220_R_CTXTCFG_SHIFT 36
+
+#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
+
+/*
+ * the size bits give us 2^N, in KB units. 0 marks as invalid,
+ * and 7 is reserved. We currently use only 2KB and 4KB
+ */
+#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
+#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
+#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
+#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
+#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
+#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
+
+#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
+
+/* packet rate matching delay multiplier */
+static u8 rate_to_delay[2][2] = {
+ /* 1x, 4x */
+ { 8, 2 }, /* SDR */
+ { 4, 1 } /* DDR */
+};
+
+static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
+ [IB_RATE_2_5_GBPS] = 8,
+ [IB_RATE_5_GBPS] = 4,
+ [IB_RATE_10_GBPS] = 2,
+ [IB_RATE_20_GBPS] = 1
+};
+
+#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
+#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
+
+/* link training states, from IBC */
+#define IB_7220_LT_STATE_DISABLED 0x00
+#define IB_7220_LT_STATE_LINKUP 0x01
+#define IB_7220_LT_STATE_POLLACTIVE 0x02
+#define IB_7220_LT_STATE_POLLQUIET 0x03
+#define IB_7220_LT_STATE_SLEEPDELAY 0x04
+#define IB_7220_LT_STATE_SLEEPQUIET 0x05
+#define IB_7220_LT_STATE_CFGDEBOUNCE 0x08
+#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
+#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
+#define IB_7220_LT_STATE_CFGIDLE 0x0b
+#define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c
+#define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e
+#define IB_7220_LT_STATE_RECOVERIDLE 0x0f
+
+/* link state machine states from IBC */
+#define IB_7220_L_STATE_DOWN 0x0
+#define IB_7220_L_STATE_INIT 0x1
+#define IB_7220_L_STATE_ARM 0x2
+#define IB_7220_L_STATE_ACTIVE 0x3
+#define IB_7220_L_STATE_ACT_DEFER 0x4
+
+static const u8 qib_7220_physportstate[0x20] = {
+ [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+ [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+ [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+ [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+ [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7220_LT_STATE_CFGDEBOUNCE] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_CFGRCVFCFG] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_CFGWAITRMT] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_RECOVERRETRAIN] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7220_LT_STATE_RECOVERWAITRMT] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7220_LT_STATE_RECOVERIDLE] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+int qib_special_trigger;
+module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
+MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
+
+#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
+#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
+
+#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
+ (1ULL << (SYM_LSB(regname, fldname) + (bit))))
+
+#define TXEMEMPARITYERR_PIOBUF \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
+#define TXEMEMPARITYERR_PIOPBC \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
+#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
+
+#define RXEMEMPARITYERR_RCVBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
+#define RXEMEMPARITYERR_LOOKUPQ \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
+#define RXEMEMPARITYERR_EXPTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
+#define RXEMEMPARITYERR_EAGERTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
+#define RXEMEMPARITYERR_FLAGBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
+#define RXEMEMPARITYERR_DATAINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
+#define RXEMEMPARITYERR_HDRINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
+
+/* 7220 specific hardware errors... */
+static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
+ /* generic hardware errors */
+ QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
+ QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
+
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
+ "TXE PIOBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
+ "TXE PIOPBC Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
+ "TXE PIOLAUNCHFIFO Memory Parity"),
+
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
+ "RXE RCVBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
+ "RXE LOOKUPQ Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
+ "RXE EAGERTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
+ "RXE EXPTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
+ "RXE FLAGBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
+ "RXE DATAINFO Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
+ "RXE HDRINFO Memory Parity"),
+
+ /* chip-specific hardware errors */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
+ "PCIe Poisoned TLP"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
+ "PCIe completion timeout"),
+ /*
+ * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+ * parity or memory parity error failures, because most likely we
+ * won't be able to talk to the core of the chip. Nonetheless, we
+ * might see them, if they are in parts of the PCIe core that aren't
+ * essential.
+ */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
+ "PCIePLL1"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
+ "PCIePLL0"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
+ "PCIe XTLH core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
+ "PCIe ADM TX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
+ "PCIe ADM RX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
+ "SerDes PLL"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
+ "PCIe cpl header queue"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
+ "PCIe cpl data queue"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
+ "Send DMA memory read"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
+ "uC PLL clock not locked"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
+ "PCIe serdes Q0 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
+ "PCIe serdes Q1 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
+ "PCIe serdes Q2 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
+ "PCIe serdes Q3 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
+ "DDS RXEQ memory parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
+ "IB uC memory parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
+ "PCIe uC oct0 memory parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
+ "PCIe uC oct1 memory parity"),
+};
+
+#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
+
+#define QLOGIC_IB_E_PKTERRS (\
+ ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | \
+ ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvEBPErr))
+
+/* Convenience for decoding Send DMA errors */
+#define QLOGIC_IB_E_SDMAERRS ( \
+ ERR_MASK(SDmaGenMismatchErr) | \
+ ERR_MASK(SDmaOutOfBoundErr) | \
+ ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
+ ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
+ ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
+ ERR_MASK(SDmaUnexpDataErr) | \
+ ERR_MASK(SDmaDescAddrMisalignErr) | \
+ ERR_MASK(SDmaDisabledErr) | \
+ ERR_MASK(SendBufMisuseErr))
+
+/* These are all rcv-related errors which we want to count for stats */
+#define E_SUM_PKTERRS \
+ (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
+ ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
+ ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
+ ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
+
+/* These are all send-related errors which we want to count for stats */
+#define E_SUM_ERRS \
+ (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(InvalidAddrErr))
+
+/*
+ * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
+ * errors not related to freeze and cancelling buffers. Can't ignore
+ * armlaunch because could get more while still cleaning up, and need
+ * to cancel those as they happen.
+ */
+#define E_SPKT_ERRS_IGNORE \
+ (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
+ ERR_MASK(SendPktLenErr))
+
+/*
+ * these are errors that can occur when the link changes state while
+ * a packet is being sent or received. This doesn't cover things
+ * like EBP or VCRC that can be the result of a sending having the
+ * link change state, so we receive a "known bad" packet.
+ */
+#define E_SUM_LINK_PKTERRS \
+ (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvUnexpectedCharErr))
+
+static void autoneg_7220_work(struct work_struct *);
+static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used.
+ * because we don't need to force the update of pioavail.
+ */
+static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
+{
+ unsigned long sbuf[3];
+ struct qib_devdata *dd = ppd->dd;
+
+ /*
+ * It's possible that sendbuffererror could have bits set; might
+ * have already done this as a result of hardware error handling.
+ */
+ /* read these before writing errorclear */
+ sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+ sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+ sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
+
+ if (sbuf[0] || sbuf[1] || sbuf[2])
+ qib_disarm_piobufs_set(dd, sbuf,
+ dd->piobcnt2k + dd->piobcnt4k);
+}
+
+static void qib_7220_txe_recover(struct qib_devdata *dd)
+{
+ qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
+ qib_disarm_7220_senderrbufs(dd->pport);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 set_sendctrl = 0;
+ u64 clr_sendctrl = 0;
+
+ if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_HALT)
+ set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
+
+ spin_lock(&dd->sendctrl_lock);
+
+ dd->sendctrl |= set_sendctrl;
+ dd->sendctrl &= ~clr_sendctrl;
+
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ spin_unlock(&dd->sendctrl_lock);
+}
+
+static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
+ u64 err, char *buf, size_t blen)
+{
+ static const struct {
+ u64 err;
+ const char *msg;
+ } errs[] = {
+ { ERR_MASK(SDmaGenMismatchErr),
+ "SDmaGenMismatch" },
+ { ERR_MASK(SDmaOutOfBoundErr),
+ "SDmaOutOfBound" },
+ { ERR_MASK(SDmaTailOutOfBoundErr),
+ "SDmaTailOutOfBound" },
+ { ERR_MASK(SDmaBaseErr),
+ "SDmaBase" },
+ { ERR_MASK(SDma1stDescErr),
+ "SDma1stDesc" },
+ { ERR_MASK(SDmaRpyTagErr),
+ "SDmaRpyTag" },
+ { ERR_MASK(SDmaDwEnErr),
+ "SDmaDwEn" },
+ { ERR_MASK(SDmaMissingDwErr),
+ "SDmaMissingDw" },
+ { ERR_MASK(SDmaUnexpDataErr),
+ "SDmaUnexpData" },
+ { ERR_MASK(SDmaDescAddrMisalignErr),
+ "SDmaDescAddrMisalign" },
+ { ERR_MASK(SendBufMisuseErr),
+ "SendBufMisuse" },
+ { ERR_MASK(SDmaDisabledErr),
+ "SDmaDisabled" },
+ };
+ int i;
+ size_t bidx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(errs); i++) {
+ if (err & errs[i].err)
+ bidx += scnprintf(buf + bidx, blen - bidx,
+ "%s ", errs[i].msg);
+ }
+}
+
+/*
+ * This is called as part of link down clean up so disarm and flush
+ * all send buffers so that SMP packets can be sent.
+ */
+static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
+{
+ /* This will trigger the Abort interrupt */
+ sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
+ QIB_SENDCTRL_AVAIL_BLIP);
+ ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
+}
+
+static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
+{
+ /*
+ * Set SendDmaLenGen and clear and set
+ * the MSB of the generation count to enable generation checking
+ * and load the internal generation counter.
+ */
+ qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
+ qib_write_kreg(ppd->dd, kr_senddmalengen,
+ ppd->sdma_descq_cnt |
+ (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
+}
+
+static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+ qib_sdma_7220_setlengen(ppd);
+ qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
+ ppd->sdma_head_dma[0] = 0;
+}
+
+#define DISABLES_SDMA ( \
+ ERR_MASK(SDmaDisabledErr) | \
+ ERR_MASK(SDmaBaseErr) | \
+ ERR_MASK(SDmaTailOutOfBoundErr) | \
+ ERR_MASK(SDmaOutOfBoundErr) | \
+ ERR_MASK(SDma1stDescErr) | \
+ ERR_MASK(SDmaRpyTagErr) | \
+ ERR_MASK(SDmaGenMismatchErr) | \
+ ERR_MASK(SDmaDescAddrMisalignErr) | \
+ ERR_MASK(SDmaMissingDwErr) | \
+ ERR_MASK(SDmaDwEnErr))
+
+static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
+{
+ unsigned long flags;
+ struct qib_devdata *dd = ppd->dd;
+ char *msg;
+
+ errs &= QLOGIC_IB_E_SDMAERRS;
+
+ msg = dd->cspec->sdmamsgbuf;
+ qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ if (errs & ERR_MASK(SendBufMisuseErr)) {
+ unsigned long sbuf[3];
+
+ sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+ sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+ sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
+
+ qib_dev_err(ppd->dd,
+ "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
+ ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
+ sbuf[0]);
+ }
+
+ if (errs & ERR_MASK(SDmaUnexpDataErr))
+ qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
+ ppd->port);
+
+ switch (ppd->sdma_state.current_state) {
+ case qib_sdma_state_s00_hw_down:
+ /* not expecting any interrupts */
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ /* handled in intr path */
+ break;
+
+ case qib_sdma_state_s20_idle:
+ /* not expecting any interrupts */
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ /* not expecting any interrupts */
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ if (errs & ERR_MASK(SDmaDisabledErr))
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e50_hw_cleaned);
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ /* handled in intr path */
+ break;
+
+ case qib_sdma_state_s99_running:
+ if (errs & DISABLES_SDMA)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e7220_err_halted);
+ break;
+ }
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * Decode the error status into strings, deciding whether to always
+ * print * it or not depending on "normal packet errors" vs everything
+ * else. Return 1 if "real" errors, otherwise 0 if only packet
+ * errors, so caller can decide what to print with the string.
+ */
+static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
+ u64 err)
+{
+ int iserr = 1;
+
+ *buf = '\0';
+ if (err & QLOGIC_IB_E_PKTERRS) {
+ if (!(err & ~QLOGIC_IB_E_PKTERRS))
+ iserr = 0;
+ if ((err & ERR_MASK(RcvICRCErr)) &&
+ !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
+ strlcat(buf, "CRC ", blen);
+ if (!iserr)
+ goto done;
+ }
+ if (err & ERR_MASK(RcvHdrLenErr))
+ strlcat(buf, "rhdrlen ", blen);
+ if (err & ERR_MASK(RcvBadTidErr))
+ strlcat(buf, "rbadtid ", blen);
+ if (err & ERR_MASK(RcvBadVersionErr))
+ strlcat(buf, "rbadversion ", blen);
+ if (err & ERR_MASK(RcvHdrErr))
+ strlcat(buf, "rhdr ", blen);
+ if (err & ERR_MASK(SendSpecialTriggerErr))
+ strlcat(buf, "sendspecialtrigger ", blen);
+ if (err & ERR_MASK(RcvLongPktLenErr))
+ strlcat(buf, "rlongpktlen ", blen);
+ if (err & ERR_MASK(RcvMaxPktLenErr))
+ strlcat(buf, "rmaxpktlen ", blen);
+ if (err & ERR_MASK(RcvMinPktLenErr))
+ strlcat(buf, "rminpktlen ", blen);
+ if (err & ERR_MASK(SendMinPktLenErr))
+ strlcat(buf, "sminpktlen ", blen);
+ if (err & ERR_MASK(RcvFormatErr))
+ strlcat(buf, "rformaterr ", blen);
+ if (err & ERR_MASK(RcvUnsupportedVLErr))
+ strlcat(buf, "runsupvl ", blen);
+ if (err & ERR_MASK(RcvUnexpectedCharErr))
+ strlcat(buf, "runexpchar ", blen);
+ if (err & ERR_MASK(RcvIBFlowErr))
+ strlcat(buf, "ribflow ", blen);
+ if (err & ERR_MASK(SendUnderRunErr))
+ strlcat(buf, "sunderrun ", blen);
+ if (err & ERR_MASK(SendPioArmLaunchErr))
+ strlcat(buf, "spioarmlaunch ", blen);
+ if (err & ERR_MASK(SendUnexpectedPktNumErr))
+ strlcat(buf, "sunexperrpktnum ", blen);
+ if (err & ERR_MASK(SendDroppedSmpPktErr))
+ strlcat(buf, "sdroppedsmppkt ", blen);
+ if (err & ERR_MASK(SendMaxPktLenErr))
+ strlcat(buf, "smaxpktlen ", blen);
+ if (err & ERR_MASK(SendUnsupportedVLErr))
+ strlcat(buf, "sunsupVL ", blen);
+ if (err & ERR_MASK(InvalidAddrErr))
+ strlcat(buf, "invalidaddr ", blen);
+ if (err & ERR_MASK(RcvEgrFullErr))
+ strlcat(buf, "rcvegrfull ", blen);
+ if (err & ERR_MASK(RcvHdrFullErr))
+ strlcat(buf, "rcvhdrfull ", blen);
+ if (err & ERR_MASK(IBStatusChanged))
+ strlcat(buf, "ibcstatuschg ", blen);
+ if (err & ERR_MASK(RcvIBLostLinkErr))
+ strlcat(buf, "riblostlink ", blen);
+ if (err & ERR_MASK(HardwareErr))
+ strlcat(buf, "hardware ", blen);
+ if (err & ERR_MASK(ResetNegated))
+ strlcat(buf, "reset ", blen);
+ if (err & QLOGIC_IB_E_SDMAERRS)
+ qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
+ if (err & ERR_MASK(InvalidEEPCmd))
+ strlcat(buf, "invalideepromcmd ", blen);
+done:
+ return iserr;
+}
+
+static void reenable_7220_chase(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ ppd->cpspec->chase_timer.expires = 0;
+ qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_POLL);
+}
+
+static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
+{
+ u8 ibclt;
+ u64 tnow;
+
+ ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
+
+ /*
+ * Detect and handle the state chase issue, where we can
+ * get stuck if we are unlucky on timing on both sides of
+ * the link. If we are, we disable, set a timer, and
+ * then re-enable.
+ */
+ switch (ibclt) {
+ case IB_7220_LT_STATE_CFGRCVFCFG:
+ case IB_7220_LT_STATE_CFGWAITRMT:
+ case IB_7220_LT_STATE_TXREVLANES:
+ case IB_7220_LT_STATE_CFGENH:
+ tnow = get_jiffies_64();
+ if (ppd->cpspec->chase_end &&
+ time_after64(tnow, ppd->cpspec->chase_end)) {
+ ppd->cpspec->chase_end = 0;
+ qib_set_ib_7220_lstate(ppd,
+ QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ ppd->cpspec->chase_timer.expires = jiffies +
+ QIB_CHASE_DIS_TIME;
+ add_timer(&ppd->cpspec->chase_timer);
+ } else if (!ppd->cpspec->chase_end)
+ ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
+ break;
+
+ default:
+ ppd->cpspec->chase_end = 0;
+ break;
+ }
+}
+
+static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
+{
+ char *msg;
+ u64 ignore_this_time = 0;
+ u64 iserr = 0;
+ int log_idx;
+ struct qib_pportdata *ppd = dd->pport;
+ u64 mask;
+
+ /* don't report errors that are masked */
+ errs &= dd->cspec->errormask;
+ msg = dd->cspec->emsgbuf;
+
+ /* do these first, they are most important */
+ if (errs & ERR_MASK(HardwareErr))
+ qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+ else
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ if (errs & QLOGIC_IB_E_SDMAERRS)
+ sdma_7220_errors(ppd, errs);
+
+ if (errs & ~IB_E_BITSEXTANT)
+ qib_dev_err(dd, "error interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (errs & ~IB_E_BITSEXTANT));
+
+ if (errs & E_SUM_ERRS) {
+ qib_disarm_7220_senderrbufs(ppd);
+ if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when trying to bring the link
+ * up, but the IB link changes state at the "wrong"
+ * time. The IB logic then complains that the packet
+ * isn't valid. We don't want to confuse people, so
+ * we just don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+ } else if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when SMA is trying to bring the link
+ * up, but the IB link changes state at the "wrong" time.
+ * The IB logic then complains that the packet isn't
+ * valid. We don't want to confuse people, so we just
+ * don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+
+ qib_write_kreg(dd, kr_errclear, errs);
+
+ errs &= ~ignore_this_time;
+ if (!errs)
+ goto done;
+
+ /*
+ * The ones we mask off are handled specially below
+ * or above. Also mask SDMADISABLED by default as it
+ * is too chatty.
+ */
+ mask = ERR_MASK(IBStatusChanged) |
+ ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
+ ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
+
+ qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+
+ if (errs & E_SUM_PKTERRS)
+ qib_stats.sps_rcverrs++;
+ if (errs & E_SUM_ERRS)
+ qib_stats.sps_txerrs++;
+ iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
+ ERR_MASK(SDmaDisabledErr));
+
+ if (errs & ERR_MASK(IBStatusChanged)) {
+ u64 ibcs;
+
+ ibcs = qib_read_kreg64(dd, kr_ibcstatus);
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ handle_7220_chase(ppd, ibcs);
+
+ /* Update our picture of width and speed from chip */
+ ppd->link_width_active =
+ ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
+ IB_WIDTH_4X : IB_WIDTH_1X;
+ ppd->link_speed_active =
+ ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
+ QIB_IB_DDR : QIB_IB_SDR;
+
+ /*
+ * Since going into a recovery state causes the link state
+ * to go down and since recovery is transitory, it is better
+ * if we "miss" ever seeing the link training state go into
+ * recovery (i.e., ignore this transition for link state
+ * special handling purposes) without updating lastibcstat.
+ */
+ if (qib_7220_phys_portstate(ibcs) !=
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
+ qib_handle_e_ibstatuschanged(ppd, ibcs);
+ }
+
+ if (errs & ERR_MASK(ResetNegated)) {
+ qib_dev_err(dd, "Got reset, requires re-init "
+ "(unload and reload driver)\n");
+ dd->flags &= ~QIB_INITTED; /* needs re-init */
+ /* mark as having had error */
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+ *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
+ }
+
+ if (*msg && iserr)
+ qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+ if (ppd->state_wanted & ppd->lflags)
+ wake_up_interruptible(&ppd->state_wait);
+
+ /*
+ * If there were hdrq or egrfull errors, wake up any processes
+ * waiting in poll. We used to try to check which contexts had
+ * the overflow, but given the cost of that and the chip reads
+ * to support it, it's better to just wake everybody up if we
+ * get an overflow; waiters can poll again if it's not them.
+ */
+ if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+ qib_handle_urcv(dd, ~0U);
+ if (errs & ERR_MASK(RcvEgrFullErr))
+ qib_stats.sps_buffull++;
+ else
+ qib_stats.sps_hdrfull++;
+ }
+done:
+ return;
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ if (dd->flags & QIB_BADINTR)
+ return;
+ qib_write_kreg(dd, kr_intmask, ~0ULL);
+ /* force re-interrupt of any pending interrupts. */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ } else
+ qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips.
+ */
+static void qib_7220_clear_freeze(struct qib_devdata *dd)
+{
+ /* disable error interrupts, to avoid confusion */
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+
+ /* also disable interrupts; errormask is sometimes overwriten */
+ qib_7220_set_intr_state(dd, 0);
+
+ qib_cancel_sends(dd->pport);
+
+ /* clear the freeze, and be sure chip saw it */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+
+ /* force in-memory update now we are out of freeze */
+ qib_force_pio_avail_update(dd);
+
+ /*
+ * force new interrupt if any hwerr, error or interrupt bits are
+ * still set, and clear "safe" send packet errors related to freeze
+ * and cancelling sends. Re-enable error interrupts before possible
+ * force of re-interrupt on pending interrupts.
+ */
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+ qib_7220_set_intr_state(dd, 1);
+}
+
+/**
+ * qib_7220_handle_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use. Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue. We reuse the same message buffer as
+ * handle_7220_errors() to avoid excessive stack usage.
+ */
+static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
+ size_t msgl)
+{
+ u64 hwerrs;
+ u32 bits, ctrl;
+ int isfatal = 0;
+ char *bitsmsg;
+ int log_idx;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (!hwerrs)
+ goto bail;
+ if (hwerrs == ~0ULL) {
+ qib_dev_err(dd, "Read of hardware error status failed "
+ "(all bits set); ignoring\n");
+ goto bail;
+ }
+ qib_stats.sps_hwerrs++;
+
+ /*
+ * Always clear the error status register, except MEMBISTFAIL,
+ * regardless of whether we continue or stop using the chip.
+ * We want that set so we know it failed, even across driver reload.
+ * We'll still ignore it in the hwerrmask. We do this partly for
+ * diagnostics, but also for support.
+ */
+ qib_write_kreg(dd, kr_hwerrclear,
+ hwerrs & ~HWE_MASK(PowerOnBISTFailed));
+
+ hwerrs &= dd->cspec->hwerrmask;
+
+ /* We log some errors to EEPROM, check if we have any of those. */
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+ if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
+ RXE_PARITY))
+ qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+ "(cleared)\n", (unsigned long long) hwerrs);
+
+ if (hwerrs & ~IB_HWE_BITSEXTANT)
+ qib_dev_err(dd, "hwerror interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (hwerrs & ~IB_HWE_BITSEXTANT));
+
+ if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
+ qib_sd7220_clr_ibpar(dd);
+
+ ctrl = qib_read_kreg32(dd, kr_control);
+ if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
+ /*
+ * Parity errors in send memory are recoverable by h/w
+ * just do housekeeping, exit freeze mode and continue.
+ */
+ if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
+ TXEMEMPARITYERR_PIOPBC)) {
+ qib_7220_txe_recover(dd);
+ hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
+ TXEMEMPARITYERR_PIOPBC);
+ }
+ if (hwerrs)
+ isfatal = 1;
+ else
+ qib_7220_clear_freeze(dd);
+ }
+
+ *msg = '\0';
+
+ if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+ isfatal = 1;
+ strlcat(msg, "[Memory BIST test failed, "
+ "InfiniPath hardware unusable]", msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
+ ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
+
+ bitsmsg = dd->cspec->bitsmsgbuf;
+ if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PCIe Mem Parity Errs %x] ", bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+
+#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP)
+
+ if (hwerrs & _QIB_PLL_FAIL) {
+ isfatal = 1;
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PLL failed (%llx), InfiniPath hardware unusable]",
+ (unsigned long long) hwerrs & _QIB_PLL_FAIL);
+ strlcat(msg, bitsmsg, msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
+ /*
+ * If it occurs, it is left masked since the eternal
+ * interface is unused.
+ */
+ dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ qib_dev_err(dd, "%s hardware error\n", msg);
+
+ if (isfatal && !dd->diag_client) {
+ qib_dev_err(dd, "Fatal Hardware Error, no longer"
+ " usable, SN %.16s\n", dd->serial);
+ /*
+ * For /sys status file and user programs to print; if no
+ * trailing brace is copied, we'll know it was truncated.
+ */
+ if (dd->freezemsg)
+ snprintf(dd->freezemsg, dd->freezelen,
+ "{%s}", msg);
+ qib_disable_after_error(dd);
+ }
+bail:;
+}
+
+/**
+ * qib_7220_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_7220_init_hwerrors(struct qib_devdata *dd)
+{
+ u64 val;
+ u64 extsval;
+
+ extsval = qib_read_kreg64(dd, kr_extstatus);
+
+ if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
+ QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
+ qib_dev_err(dd, "MemBIST did not complete!\n");
+ if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
+ qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
+
+ val = ~0ULL; /* default to all hwerrors become interrupts, */
+
+ val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
+ dd->cspec->hwerrmask = val;
+
+ qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+ /* clear all */
+ qib_write_kreg(dd, kr_errclear, ~0ULL);
+ /* enable errors that are masked, at least this first time. */
+ qib_write_kreg(dd, kr_errmask, ~0ULL);
+ dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+ /* clear any interrupts up to this point (ints still not enabled) */
+ qib_write_kreg(dd, kr_intclear, ~0ULL);
+}
+
+/*
+ * Disable and enable the armlaunch error. Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based. There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
+ dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
+ } else
+ dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd)
+{
+ u64 mod_wd;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+ /*
+ * If we are told to disable, note that so link-recovery
+ * code does not attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+ /*
+ * Any other linkinitcmd will lead to LINKDOWN and then
+ * to INIT (if all is well), so clear flag to let
+ * link-recovery code attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+ mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
+ (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+}
+
+/*
+ * All detailed interaction with the SerDes has been moved to qib_sd7220.c
+ *
+ * The portion of IBA7220-specific bringup_serdes() that actually deals with
+ * registers and memory within the SerDes itself is qib_sd7220_init().
+ */
+
+/**
+ * qib_7220_bringup_serdes - bring up the serdes
+ * @ppd: physical port on the qlogic_ib device
+ */
+static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val, prev_val, guid, ibc;
+ int ret = 0;
+
+ /* Put IBC in reset, sends disabled */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control, 0ULL);
+
+ if (qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap =
+ read_7220_creg32(dd, cr_iblinkerrrecov);
+ }
+
+ /* flowcontrolwatermark is in units of KBytes */
+ ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
+ /*
+ * How often flowctrl sent. More or less in usecs; balance against
+ * watermark value, so that in theory senders always get a flow
+ * control update in time to not let the IB link go idle.
+ */
+ ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
+ /* max error tolerance */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
+ /* use "real" buffer space for */
+ ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
+ /* IB credit flow control. */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
+ /*
+ * set initial max size pkt IBC will send, including ICRC; it's the
+ * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+ */
+ ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
+ ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
+
+ /* initially come up waiting for TS1, without sending anything. */
+ val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg(dd, kr_ibcctrl, val);
+
+ if (!ppd->cpspec->ibcddrctrl) {
+ /* not on re-init after reset */
+ ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
+
+ if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ else
+ ppd->cpspec->ibcddrctrl |=
+ ppd->link_speed_enabled == QIB_IB_DDR ?
+ IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+ if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
+ (IB_WIDTH_1X | IB_WIDTH_4X))
+ ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
+ else
+ ppd->cpspec->ibcddrctrl |=
+ ppd->link_width_enabled == IB_WIDTH_4X ?
+ IBA7220_IBC_WIDTH_4X_ONLY :
+ IBA7220_IBC_WIDTH_1X_ONLY;
+
+ /* always enable these on driver reload, not sticky */
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
+
+ /* enable automatic lane reversal detection for receive */
+ ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
+ } else
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ ret = qib_sd7220_init(dd);
+
+ val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ prev_val = val;
+ val |= QLOGIC_IB_XGXS_FC_SAFE;
+ if (val != prev_val) {
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+ if (val & QLOGIC_IB_XGXS_RESET)
+ val &= ~QLOGIC_IB_XGXS_RESET;
+ if (val != prev_val)
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+
+ /* first time through, set port guid */
+ if (!ppd->guid)
+ ppd->guid = dd->base_guid;
+ guid = be64_to_cpu(ppd->guid);
+
+ qib_write_kreg(dd, kr_hrtbt_guid, guid);
+ if (!ret) {
+ dd->control |= QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control, dd->control);
+ } else
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+ return ret;
+}
+
+/**
+ * qib_7220_quiet_serdes - set serdes to txidle
+ * @ppd: physical port of the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
+{
+ u64 val;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ /* disable IBC */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control,
+ dd->control | QLOGIC_IB_C_FREEZEMODE);
+
+ ppd->cpspec->chase_end = 0;
+ if (ppd->cpspec->chase_timer.data) /* if initted */
+ del_timer_sync(&ppd->cpspec->chase_timer);
+
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
+ ppd->cpspec->ibdeltainprog) {
+ u64 diagc;
+
+ /* enable counter writes */
+ diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+ qib_write_kreg(dd, kr_hwdiagctrl,
+ diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7220_creg32(dd, cr_ibsymbolerr);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->ibsymsnap;
+ val -= ppd->cpspec->ibsymdelta;
+ write_7220_creg(dd, cr_ibsymbolerr, val);
+ }
+ if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7220_creg32(dd, cr_iblinkerrrecov);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->iblnkerrsnap;
+ val -= ppd->cpspec->iblnkerrdelta;
+ write_7220_creg(dd, cr_iblinkerrrecov, val);
+ }
+
+ /* and disable counter writes */
+ qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+ }
+ qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ cancel_delayed_work(&ppd->cpspec->autoneg_work);
+ flush_scheduled_work();
+
+ shutdown_7220_relock_poll(ppd->dd);
+ val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
+ val |= QLOGIC_IB_XGXS_RESET;
+ qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
+}
+
+/**
+ * qib_setup_7220_setextled - set the state of the two external LEDs
+ * @dd: the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+ *
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note: We try to match the Mellanox HCA LED behavior as best
+ * we can. Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate. That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 extctl, ledblink = 0, val, lst, ltst;
+ unsigned long flags;
+
+ /*
+ * The diags use the LED to indicate diag info, so we leave
+ * the external LED alone when the diags are running.
+ */
+ if (dd->diag_client)
+ return;
+
+ if (ppd->led_override) {
+ ltst = (ppd->led_override & QIB_LED_PHYS) ?
+ IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
+ lst = (ppd->led_override & QIB_LED_LOG) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ } else if (on) {
+ val = qib_read_kreg64(dd, kr_ibcstatus);
+ ltst = qib_7220_phys_portstate(val);
+ lst = qib_7220_iblink_state(val);
+ } else {
+ ltst = 0;
+ lst = 0;
+ }
+
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
+ SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
+ if (ltst == IB_PHYSPORTSTATE_LINKUP) {
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
+ /*
+ * counts are in chip clock (4ns) periods.
+ * This is 1/16 sec (66.6ms) on,
+ * 3/16 sec (187.5 ms) off, with packets rcvd
+ */
+ ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
+ | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
+ }
+ if (lst == IB_PORT_ACTIVE)
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
+ dd->cspec->extctrl = extctl;
+ qib_write_kreg(dd, kr_extctrl, extctl);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+
+ if (ledblink) /* blink the LED on packet receive */
+ qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
+}
+
+static void qib_7220_free_irq(struct qib_devdata *dd)
+{
+ if (dd->cspec->irq) {
+ free_irq(dd->cspec->irq, dd);
+ dd->cspec->irq = 0;
+ }
+ qib_nomsi(dd);
+}
+
+/*
+ * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the qlogic_ib device
+ *
+ * This is called during driver unload.
+ *
+ */
+static void qib_setup_7220_cleanup(struct qib_devdata *dd)
+{
+ qib_7220_free_irq(dd);
+ kfree(dd->cspec->cntrs);
+ kfree(dd->cspec->portcntrs);
+}
+
+/*
+ * This is only called for SDmaInt.
+ * SDmaDisabled is handled on the error path.
+ */
+static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ switch (ppd->sdma_state.current_state) {
+ case qib_sdma_state_s00_hw_down:
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
+ break;
+
+ case qib_sdma_state_s20_idle:
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
+ break;
+
+ case qib_sdma_state_s99_running:
+ /* too chatty to print here */
+ __qib_sdma_intr(ppd);
+ break;
+ }
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (needint) {
+ if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+ goto done;
+ /*
+ * blip the availupd off, next write will be on, so
+ * we ensure an avail update, regardless of threshold or
+ * buffers becoming free, whenever we want an interrupt
+ */
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
+ ~SYM_MASK(SendCtrl, SendBufAvailUpd));
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
+ } else
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+done:
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * Handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling.
+ */
+static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
+{
+ if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
+ qib_dev_err(dd,
+ "interrupt with unknown interrupts %Lx set\n",
+ istat & ~QLOGIC_IB_I_BITSEXTANT);
+
+ if (istat & QLOGIC_IB_I_GPIO) {
+ u32 gpiostatus;
+
+ /*
+ * Boards for this chip currently don't use GPIO interrupts,
+ * so clear by writing GPIOstatus to GPIOclear, and complain
+ * to alert developer. To avoid endless repeats, clear
+ * the bits in the mask, since there is some kind of
+ * programming error or chip problem.
+ */
+ gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+ /*
+ * In theory, writing GPIOstatus to GPIOclear could
+ * have a bad side-effect on some diagnostic that wanted
+ * to poll for a status-change, but the various shadows
+ * make that problematic at best. Diags will just suppress
+ * all GPIO interrupts during such tests.
+ */
+ qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
+
+ if (gpiostatus) {
+ const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+ u32 gpio_irq = mask & gpiostatus;
+
+ /*
+ * A bit set in status and (chip) Mask register
+ * would cause an interrupt. Since we are not
+ * expecting any, report it. Also check that the
+ * chip reflects our shadow, report issues,
+ * and refresh from the shadow.
+ */
+ /*
+ * Clear any troublemakers, and update chip
+ * from shadow
+ */
+ dd->cspec->gpio_mask &= ~gpio_irq;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ }
+ }
+
+ if (istat & QLOGIC_IB_I_ERROR) {
+ u64 estat;
+
+ qib_stats.sps_errints++;
+ estat = qib_read_kreg64(dd, kr_errstatus);
+ if (!estat)
+ qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
+ "but no error bits set!\n", istat);
+ else
+ handle_7220_errors(dd, estat);
+ }
+}
+
+static irqreturn_t qib_7220intr(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+ irqreturn_t ret;
+ u64 istat;
+ u64 ctxtrbits;
+ u64 rmask;
+ unsigned i;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ ret = IRQ_HANDLED;
+ goto bail;
+ }
+
+ istat = qib_read_kreg64(dd, kr_intstatus);
+
+ if (unlikely(!istat)) {
+ ret = IRQ_NONE; /* not our interrupt, or already handled */
+ goto bail;
+ }
+ if (unlikely(istat == -1)) {
+ qib_bad_intrstatus(dd);
+ /* don't know if it was our interrupt or not */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
+ QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
+ unlikely_7220_intr(dd, istat);
+
+ /*
+ * Clear the interrupt bits we found set, relatively early, so we
+ * "know" know the chip will have seen this by the time we process
+ * the queue, and will re-interrupt if necessary. The processor
+ * itself won't take the interrupt again until we return.
+ */
+ qib_write_kreg(dd, kr_intclear, istat);
+
+ /*
+ * Handle kernel receive queues before checking for pio buffers
+ * available since receives can overflow; piobuf waiters can afford
+ * a few extra cycles, since they were waiting anyway.
+ */
+ ctxtrbits = istat &
+ ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
+ if (ctxtrbits) {
+ rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ if (ctxtrbits & rmask) {
+ ctxtrbits &= ~rmask;
+ qib_kreceive(dd->rcd[i], NULL, NULL);
+ }
+ rmask <<= 1;
+ }
+ if (ctxtrbits) {
+ ctxtrbits =
+ (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
+ qib_handle_urcv(dd, ctxtrbits);
+ }
+ }
+
+ /* only call for SDmaInt */
+ if (istat & QLOGIC_IB_I_SDMAINT)
+ sdma_7220_intr(dd->pport, istat);
+
+ if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+ qib_ib_piobufavail(dd);
+
+ ret = IRQ_HANDLED;
+bail:
+ return ret;
+}
+
+/*
+ * Set up our chip-specific interrupt handler.
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ * If we are using MSI interrupts, we may fall back to
+ * INTx later, if the interrupt handler doesn't get called
+ * within 1/2 second (see verify_interrupt()).
+ */
+static void qib_setup_7220_interrupt(struct qib_devdata *dd)
+{
+ if (!dd->cspec->irq)
+ qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
+ "work\n");
+ else {
+ int ret = request_irq(dd->cspec->irq, qib_7220intr,
+ dd->msi_lo ? 0 : IRQF_SHARED,
+ QIB_DRV_NAME, dd);
+
+ if (ret)
+ qib_dev_err(dd, "Couldn't setup %s interrupt "
+ "(irq=%d): %d\n", dd->msi_lo ?
+ "MSI" : "INTx", dd->cspec->irq, ret);
+ }
+}
+
+/**
+ * qib_7220_boardname - fill in the board name
+ * @dd: the qlogic_ib device
+ *
+ * info is based on the board revision register
+ */
+static void qib_7220_boardname(struct qib_devdata *dd)
+{
+ char *n;
+ u32 boardid, namelen;
+
+ boardid = SYM_FIELD(dd->revision, Revision,
+ BoardID);
+
+ switch (boardid) {
+ case 1:
+ n = "InfiniPath_QLE7240";
+ break;
+ case 2:
+ n = "InfiniPath_QLE7280";
+ break;
+ default:
+ qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
+ n = "Unknown_InfiniPath_7220";
+ break;
+ }
+
+ namelen = strlen(n) + 1;
+ dd->boardname = kmalloc(namelen, GFP_KERNEL);
+ if (!dd->boardname)
+ qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+ else
+ snprintf(dd->boardname, namelen, "%s", n);
+
+ if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
+ qib_dev_err(dd, "Unsupported InfiniPath hardware "
+ "revision %u.%u!\n",
+ dd->majrev, dd->minrev);
+
+ snprintf(dd->boardversion, sizeof(dd->boardversion),
+ "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+ QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ dd->majrev, dd->minrev,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context.
+ */
+static int qib_setup_7220_reset(struct qib_devdata *dd)
+{
+ u64 val;
+ int i;
+ int ret;
+ u16 cmdval;
+ u8 int_line, clinesz;
+ unsigned long flags;
+
+ qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+ /* Use dev_err so it shows up in logs, etc. */
+ qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+ /* no interrupts till re-initted */
+ qib_7220_set_intr_state(dd, 0);
+
+ dd->pport->cpspec->ibdeltainprog = 0;
+ dd->pport->cpspec->ibsymdelta = 0;
+ dd->pport->cpspec->iblnkerrdelta = 0;
+
+ /*
+ * Keep chip from being accessed until we are ready. Use
+ * writeq() directly, to allow the write even though QIB_PRESENT
+ * isnt' set.
+ */
+ dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
+ dd->int_counter = 0; /* so we check interrupts work again */
+ val = dd->control | QLOGIC_IB_C_RESET;
+ writeq(val, &dd->kregbase[kr_control]);
+ mb(); /* prevent compiler reordering around actual reset */
+
+ for (i = 1; i <= 5; i++) {
+ /*
+ * Allow MBIST, etc. to complete; longer on each retry.
+ * We sometimes get machine checks from bus timeout if no
+ * response, so for now, make it *really* long.
+ */
+ msleep(1000 + (1 + i) * 2000);
+
+ qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+ /*
+ * Use readq directly, so we don't need to mark it as PRESENT
+ * until we get a successful indication that all is well.
+ */
+ val = readq(&dd->kregbase[kr_revision]);
+ if (val == dd->revision) {
+ dd->flags |= QIB_PRESENT; /* it's back */
+ ret = qib_reinit_intr(dd);
+ goto bail;
+ }
+ }
+ ret = 0; /* failed */
+
+bail:
+ if (ret) {
+ if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
+ qib_dev_err(dd, "Reset failed to setup PCIe or "
+ "interrupts; continuing anyway\n");
+
+ /* hold IBC in reset, no sends, etc till later */
+ qib_write_kreg(dd, kr_control, 0ULL);
+
+ /* clear the reset error, init error/hwerror mask */
+ qib_7220_init_hwerrors(dd);
+
+ /* do setup similar to speed or link-width changes */
+ if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
+ dd->cspec->presets_needed = 1;
+ spin_lock_irqsave(&dd->pport->lflags_lock, flags);
+ dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
+ dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
+ }
+
+ return ret;
+}
+
+/**
+ * qib_7220_put_tid - write a TID to the chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ */
+static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ if (pa != dd->tidinvalid) {
+ u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
+
+ /* paranoia checks */
+ if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ chippa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ chippa |= IBA7220_TID_SZ_4K;
+ pa = chippa;
+ }
+ writeq(pa, tidptr);
+ mmiowb();
+}
+
+/**
+ * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the ctxt
+ *
+ * clear all TID entries for a ctxt, expected and eager.
+ * Used from qib_close(). On this chip, TIDs are only 32 bits,
+ * not 64, but they are still on 64 bit boundaries, so tidbase
+ * is declared as u64 * for the pointer math, even though we write 32 bits
+ */
+static void qib_7220_clear_tids(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd)
+{
+ u64 __iomem *tidbase;
+ unsigned long tidinv;
+ u32 ctxt;
+ int i;
+
+ if (!dd->kregbase || !rcd)
+ return;
+
+ ctxt = rcd->ctxt;
+
+ tidinv = dd->tidinvalid;
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvtidbase +
+ ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+ for (i = 0; i < dd->rcvtidcnt; i++)
+ qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+ tidinv);
+
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvegrbase +
+ rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+ for (i = 0; i < rcd->rcvegrcnt; i++)
+ qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+ tidinv);
+}
+
+/**
+ * qib_7220_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_7220_tidtemplate(struct qib_devdata *dd)
+{
+ if (dd->rcvegrbufsize == 2048)
+ dd->tidtemplate = IBA7220_TID_SZ_2K;
+ else if (dd->rcvegrbufsize == 4096)
+ dd->tidtemplate = IBA7220_TID_SZ_4K;
+ dd->tidinvalid = 0;
+}
+
+/**
+ * qib_init_7220_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
+ struct qib_base_info *kinfo)
+{
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
+ QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
+
+ if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
+
+ return 0;
+}
+
+static struct qib_message_header *
+qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+ u32 offset = qib_hdrget_offset(rhf_addr);
+
+ return (struct qib_message_header *)
+ (rhf_addr - dd->rhf_offset + offset);
+}
+
+static void qib_7220_config_ctxts(struct qib_devdata *dd)
+{
+ unsigned long flags;
+ u32 nchipctxts;
+
+ nchipctxts = qib_read_kreg32(dd, kr_portcnt);
+ dd->cspec->numctxts = nchipctxts;
+ if (qib_n_krcv_queues > 1) {
+ dd->qpn_mask = 0x3f;
+ dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
+ if (dd->first_user_ctxt > nchipctxts)
+ dd->first_user_ctxt = nchipctxts;
+ } else
+ dd->first_user_ctxt = dd->num_pports;
+ dd->n_krcv_queues = dd->first_user_ctxt;
+
+ if (!qib_cfgctxts) {
+ int nctxts = dd->first_user_ctxt + num_online_cpus();
+
+ if (nctxts <= 5)
+ dd->ctxtcnt = 5;
+ else if (nctxts <= 9)
+ dd->ctxtcnt = 9;
+ else if (nctxts <= nchipctxts)
+ dd->ctxtcnt = nchipctxts;
+ } else if (qib_cfgctxts <= nchipctxts)
+ dd->ctxtcnt = qib_cfgctxts;
+ if (!dd->ctxtcnt) /* none of the above, set to max */
+ dd->ctxtcnt = nchipctxts;
+
+ /*
+ * Chip can be configured for 5, 9, or 17 ctxts, and choice
+ * affects number of eager TIDs per ctxt (1K, 2K, 4K).
+ * Lock to be paranoid about later motion, etc.
+ */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ if (dd->ctxtcnt > 9)
+ dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
+ else if (dd->ctxtcnt > 5)
+ dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
+ /* else configure for default 5 receive ctxts */
+ if (dd->qpn_mask)
+ dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* kr_rcvegrcnt changes based on the number of contexts enabled */
+ dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+ dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
+}
+
+static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+ int lsb, ret = 0;
+ u64 maskr; /* right-justified mask */
+
+ switch (which) {
+ case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
+ ret = ppd->link_width_enabled;
+ goto done;
+
+ case QIB_IB_CFG_LWID: /* Get currently active Link-width */
+ ret = ppd->link_width_active;
+ goto done;
+
+ case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
+ ret = ppd->link_speed_enabled;
+ goto done;
+
+ case QIB_IB_CFG_SPD: /* Get current Link spd */
+ ret = ppd->link_speed_active;
+ goto done;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
+ lsb = IBA7220_IBC_RXPOL_SHIFT;
+ maskr = IBA7220_IBC_RXPOL_MASK;
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
+ lsb = IBA7220_IBC_LREV_SHIFT;
+ maskr = IBA7220_IBC_LREV_MASK;
+ break;
+
+ case QIB_IB_CFG_LINKLATENCY:
+ ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
+ & IBA7220_DDRSTAT_LINKLAT_MASK;
+ goto done;
+
+ case QIB_IB_CFG_OP_VLS:
+ ret = ppd->vls_operational;
+ goto done;
+
+ case QIB_IB_CFG_VL_HIGH_CAP:
+ ret = 0;
+ goto done;
+
+ case QIB_IB_CFG_VL_LOW_CAP:
+ ret = 0;
+ goto done;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ goto done;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ goto done;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ ret = (ppd->cpspec->ibcctrl &
+ SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
+ IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+ goto done;
+
+ case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+ lsb = IBA7220_IBC_HRTBT_SHIFT;
+ maskr = IBA7220_IBC_HRTBT_MASK;
+ break;
+
+ case QIB_IB_CFG_PMA_TICKS:
+ /*
+ * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
+ * Since the clock is always 250MHz, the value is 1 or 0.
+ */
+ ret = (ppd->link_speed_active == QIB_IB_DDR);
+ goto done;
+
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
+done:
+ return ret;
+}
+
+static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 maskr; /* right-justified mask */
+ int lsb, ret = 0, setforce = 0;
+ u16 lcmd, licmd;
+ unsigned long flags;
+
+ switch (which) {
+ case QIB_IB_CFG_LIDLMC:
+ /*
+ * Set LID and LMC. Combined to avoid possible hazard
+ * caller puts LMC in 16MSbits, DLID in 16LSbits of val
+ */
+ lsb = IBA7220_IBC_DLIDLMC_SHIFT;
+ maskr = IBA7220_IBC_DLIDLMC_MASK;
+ break;
+
+ case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
+ /*
+ * As with speed, only write the actual register if
+ * the link is currently down, otherwise takes effect
+ * on next link change.
+ */
+ ppd->link_width_enabled = val;
+ if (!(ppd->lflags & QIBL_LINKDOWN))
+ goto bail;
+ /*
+ * We set the QIBL_IB_FORCE_NOTIFY bit so updown
+ * will get called because we want update
+ * link_width_active, and the change may not take
+ * effect for some time (if we are in POLL), so this
+ * flag will force the updown routine to be called
+ * on the next ibstatuschange down interrupt, even
+ * if it's not an down->up transition.
+ */
+ val--; /* convert from IB to chip */
+ maskr = IBA7220_IBC_WIDTH_MASK;
+ lsb = IBA7220_IBC_WIDTH_SHIFT;
+ setforce = 1;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ break;
+
+ case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
+ /*
+ * If we turn off IB1.2, need to preset SerDes defaults,
+ * but not right now. Set a flag for the next time
+ * we command the link down. As with width, only write the
+ * actual register if the link is currently down, otherwise
+ * takes effect on next link change. Since setting is being
+ * explictly requested (via MAD or sysfs), clear autoneg
+ * failure status if speed autoneg is enabled.
+ */
+ ppd->link_speed_enabled = val;
+ if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
+ !(val & (val - 1)))
+ dd->cspec->presets_needed = 1;
+ if (!(ppd->lflags & QIBL_LINKDOWN))
+ goto bail;
+ /*
+ * We set the QIBL_IB_FORCE_NOTIFY bit so updown
+ * will get called because we want update
+ * link_speed_active, and the change may not take
+ * effect for some time (if we are in POLL), so this
+ * flag will force the updown routine to be called
+ * on the next ibstatuschange down interrupt, even
+ * if it's not an down->up transition.
+ */
+ if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
+ val = IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else
+ val = val == QIB_IB_DDR ?
+ IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+ maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ /* IBTA 1.2 mode + speed bits are contiguous */
+ lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
+ setforce = 1;
+ break;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
+ lsb = IBA7220_IBC_RXPOL_SHIFT;
+ maskr = IBA7220_IBC_RXPOL_MASK;
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
+ lsb = IBA7220_IBC_LREV_SHIFT;
+ maskr = IBA7220_IBC_LREV_MASK;
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, OverrunThreshold);
+ ppd->cpspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, OverrunThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, PhyerrThreshold);
+ ppd->cpspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, PhyerrThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PKEYS: /* update pkeys */
+ maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+ qib_write_kreg(dd, kr_partitionkey, maskr);
+ goto bail;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ if (val == IB_LINKINITCMD_POLL)
+ ppd->cpspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ else /* SLEEP */
+ ppd->cpspec->ibcctrl |=
+ SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ goto bail;
+
+ case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+ /*
+ * Update our housekeeping variables, and set IBC max
+ * size, same as init code; max IBC is max we allow in
+ * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+ * Set even if it's unchanged, print debug message only
+ * on changes.
+ */
+ val = (ppd->ibmaxlen >> 2) + 1;
+ ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
+ ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ goto bail;
+
+ case QIB_IB_CFG_LSTATE: /* set the IB link state */
+ switch (val & 0xffff0000) {
+ case IB_LINKCMD_DOWN:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+ if (!ppd->cpspec->ibdeltainprog &&
+ qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap =
+ read_7220_creg32(dd, cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap =
+ read_7220_creg32(dd, cr_iblinkerrrecov);
+ }
+ break;
+
+ case IB_LINKCMD_ARMED:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+ break;
+
+ case IB_LINKCMD_ACTIVE:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+ goto bail;
+ }
+ switch (val & 0xffff) {
+ case IB_LINKINITCMD_NOP:
+ licmd = 0;
+ break;
+
+ case IB_LINKINITCMD_POLL:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+ break;
+
+ case IB_LINKINITCMD_SLEEP:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+ break;
+
+ case IB_LINKINITCMD_DISABLE:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+ ppd->cpspec->chase_end = 0;
+ /*
+ * stop state chase counter and timer, if running.
+ * wait forpending timer, but don't clear .data (ppd)!
+ */
+ if (ppd->cpspec->chase_timer.expires) {
+ del_timer_sync(&ppd->cpspec->chase_timer);
+ ppd->cpspec->chase_timer.expires = 0;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+ val & 0xffff);
+ goto bail;
+ }
+ qib_set_ib_7220_lstate(ppd, lcmd, licmd);
+ goto bail;
+
+ case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
+ if (val > IBA7220_IBC_HRTBT_MASK) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ lsb = IBA7220_IBC_HRTBT_SHIFT;
+ maskr = IBA7220_IBC_HRTBT_MASK;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+ ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
+ ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
+ qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ if (setforce) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+bail:
+ return ret;
+}
+
+static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+ int ret = 0;
+ u64 val, ddr;
+
+ if (!strncmp(what, "ibc", 3)) {
+ ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
+ val = 0; /* disable heart beat, so link will come up */
+ qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+ ppd->dd->unit, ppd->port);
+ } else if (!strncmp(what, "off", 3)) {
+ ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
+ /* enable heart beat again */
+ val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
+ qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+ "(normal)\n", ppd->dd->unit, ppd->port);
+ } else
+ ret = -EINVAL;
+ if (!ret) {
+ qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
+ << IBA7220_IBC_HRTBT_SHIFT);
+ ppd->cpspec->ibcddrctrl = ddr | val;
+ qib_write_kreg(ppd->dd, kr_ibcddrctrl,
+ ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ }
+ return ret;
+}
+
+static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ u32 updegr, u32 egrhd)
+{
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
+{
+ u32 head, tail;
+
+ head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+ if (rcd->rcvhdrtail_kvaddr)
+ tail = qib_get_rcvhdrtail(rcd);
+ else
+ tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+ return head == tail;
+}
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specifc, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
+ int ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 mask, val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ if (op & QIB_RCVCTRL_TAILUPD_ENB)
+ dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_TAILUPD_DIS)
+ dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_ENB)
+ dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_DIS)
+ dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
+ if (ctxt < 0)
+ mask = (1ULL << dd->ctxtcnt) - 1;
+ else
+ mask = (1ULL << ctxt);
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /* always done for specific ctxt */
+ dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (!(dd->flags & QIB_NODMA_RTAIL))
+ dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
+ /* Write these registers before the context is enabled. */
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrq_phys);
+ dd->rcd[ctxt]->seq_cnt = 1;
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+ dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
+ if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+ dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
+ /* arm rcv interrupt */
+ val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
+ dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /*
+ * Init the context registers also; if we were
+ * disabled, tail and head should both be zero
+ * already from the enable, but since we don't
+ * know, we have to do it explictly.
+ */
+ val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+ qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+ val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+ dd->rcd[ctxt]->head = val;
+ /* If kctxt, interrupt on next receive. */
+ if (ctxt < dd->first_user_ctxt)
+ val |= dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS) {
+ if (ctxt >= 0) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
+ } else {
+ unsigned i;
+
+ for (i = 0; i < dd->cfgctxts; i++) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
+ i, 0);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function there may be multiple such registers with
+ * slightly different layouts. To start, we assume the
+ * "canonical" register layout of the first chips.
+ * Chip requires no back-back sendctrl writes, so write
+ * scratch register after writing sendctrl
+ */
+static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 tmp_dd_sendctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+ /* First the ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_CLEAR)
+ dd->sendctrl = 0;
+ if (op & QIB_SENDCTRL_SEND_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
+ else if (op & QIB_SENDCTRL_SEND_ENB) {
+ dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
+ if (dd->flags & QIB_USE_SPCL_TRIG)
+ dd->sendctrl |= SYM_MASK(SendCtrl,
+ SSpecialTriggerEn);
+ }
+ if (op & QIB_SENDCTRL_AVAIL_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+ else if (op & QIB_SENDCTRL_AVAIL_ENB)
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+ if (op & QIB_SENDCTRL_DISARM_ALL) {
+ u32 i, last;
+
+ tmp_dd_sendctrl = dd->sendctrl;
+ /*
+ * disarm any that are not yet launched, disabling sends
+ * and updates until done.
+ */
+ last = dd->piobcnt2k + dd->piobcnt4k;
+ tmp_dd_sendctrl &=
+ ~(SYM_MASK(SendCtrl, SPioEnable) |
+ SYM_MASK(SendCtrl, SendBufAvailUpd));
+ for (i = 0; i < last; i++) {
+ qib_write_kreg(dd, kr_sendctrl,
+ tmp_dd_sendctrl |
+ SYM_MASK(SendCtrl, Disarm) | i);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ }
+
+ tmp_dd_sendctrl = dd->sendctrl;
+
+ if (op & QIB_SENDCTRL_FLUSH)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
+ if (op & QIB_SENDCTRL_DISARM)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+ ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
+ SYM_LSB(SendCtrl, DisarmPIOBuf));
+ if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
+ (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u32 v;
+ /*
+ * ensure writes have hit chip, then do a few
+ * more reads, to allow DMA of pioavail registers
+ * to occur, so in-memory copy is in sync with
+ * the chip. Not always safe to sleep.
+ */
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+}
+
+/**
+ * qib_portcntr_7220 - read a per-port counter
+ * @dd: the qlogic_ib device
+ * @creg: the counter to snapshot
+ */
+static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
+{
+ u64 ret = 0ULL;
+ struct qib_devdata *dd = ppd->dd;
+ u16 creg;
+ /* 0xffff for unimplemented or synthesized counters */
+ static const u16 xlator[] = {
+ [QIBPORTCNTR_PKTSEND] = cr_pktsend,
+ [QIBPORTCNTR_WORDSEND] = cr_wordsend,
+ [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
+ [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
+ [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
+ [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
+ [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
+ [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
+ [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
+ [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
+ [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
+ [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
+ [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
+ [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
+ [QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
+ [QIBPORTCNTR_ERRICRC] = cr_erricrc,
+ [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
+ [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
+ [QIBPORTCNTR_BADFORMAT] = cr_badformat,
+ [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
+ [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
+ [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
+ [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
+ [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
+ [QIBPORTCNTR_ERRLINK] = cr_errlink,
+ [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
+ [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
+ [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
+ [QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
+ [QIBPORTCNTR_PSSTART] = cr_psstart,
+ [QIBPORTCNTR_PSSTAT] = cr_psstat,
+ [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
+ [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
+ [QIBPORTCNTR_KHDROVFL] = 0xffff,
+ };
+
+ if (reg >= ARRAY_SIZE(xlator)) {
+ qib_devinfo(ppd->dd->pcidev,
+ "Unimplemented portcounter %u\n", reg);
+ goto done;
+ }
+ creg = xlator[reg];
+
+ if (reg == QIBPORTCNTR_KHDROVFL) {
+ int i;
+
+ /* sum over all kernel contexts */
+ for (i = 0; i < dd->first_user_ctxt; i++)
+ ret += read_7220_creg32(dd, cr_portovfl + i);
+ }
+ if (creg == 0xffff)
+ goto done;
+
+ /*
+ * only fast incrementing counters are 64bit; use 32 bit reads to
+ * avoid two independent reads when on opteron
+ */
+ if ((creg == cr_wordsend || creg == cr_wordrcv ||
+ creg == cr_pktsend || creg == cr_pktrcv))
+ ret = read_7220_creg(dd, creg);
+ else
+ ret = read_7220_creg32(dd, creg);
+ if (creg == cr_ibsymbolerr) {
+ if (dd->pport->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->ibsymsnap;
+ ret -= dd->pport->cpspec->ibsymdelta;
+ } else if (creg == cr_iblinkerrrecov) {
+ if (dd->pport->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->iblnkerrsnap;
+ ret -= dd->pport->cpspec->iblnkerrdelta;
+ }
+done:
+ return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string. Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility. Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr7220indices contains the corresponding register indices.
+ */
+static const char cntr7220names[] =
+ "Interrupts\n"
+ "HostBusStall\n"
+ "E RxTIDFull\n"
+ "RxTIDInvalid\n"
+ "Ctxt0EgrOvfl\n"
+ "Ctxt1EgrOvfl\n"
+ "Ctxt2EgrOvfl\n"
+ "Ctxt3EgrOvfl\n"
+ "Ctxt4EgrOvfl\n"
+ "Ctxt5EgrOvfl\n"
+ "Ctxt6EgrOvfl\n"
+ "Ctxt7EgrOvfl\n"
+ "Ctxt8EgrOvfl\n"
+ "Ctxt9EgrOvfl\n"
+ "Ctx10EgrOvfl\n"
+ "Ctx11EgrOvfl\n"
+ "Ctx12EgrOvfl\n"
+ "Ctx13EgrOvfl\n"
+ "Ctx14EgrOvfl\n"
+ "Ctx15EgrOvfl\n"
+ "Ctx16EgrOvfl\n";
+
+static const size_t cntr7220indices[] = {
+ cr_lbint,
+ cr_lbflowstall,
+ cr_errtidfull,
+ cr_errtidvalid,
+ cr_portovfl + 0,
+ cr_portovfl + 1,
+ cr_portovfl + 2,
+ cr_portovfl + 3,
+ cr_portovfl + 4,
+ cr_portovfl + 5,
+ cr_portovfl + 6,
+ cr_portovfl + 7,
+ cr_portovfl + 8,
+ cr_portovfl + 9,
+ cr_portovfl + 10,
+ cr_portovfl + 11,
+ cr_portovfl + 12,
+ cr_portovfl + 13,
+ cr_portovfl + 14,
+ cr_portovfl + 15,
+ cr_portovfl + 16,
+};
+
+/*
+ * same as cntr7220names and cntr7220indices, but for port-specific counters.
+ * portcntr7220indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr7220names[] =
+ "TxPkt\n"
+ "TxFlowPkt\n"
+ "TxWords\n"
+ "RxPkt\n"
+ "RxFlowPkt\n"
+ "RxWords\n"
+ "TxFlowStall\n"
+ "TxDmaDesc\n" /* 7220 and 7322-only */
+ "E RxDlidFltr\n" /* 7220 and 7322-only */
+ "IBStatusChng\n"
+ "IBLinkDown\n"
+ "IBLnkRecov\n"
+ "IBRxLinkErr\n"
+ "IBSymbolErr\n"
+ "RxLLIErr\n"
+ "RxBadFormat\n"
+ "RxBadLen\n"
+ "RxBufOvrfl\n"
+ "RxEBP\n"
+ "RxFlowCtlErr\n"
+ "RxICRCerr\n"
+ "RxLPCRCerr\n"
+ "RxVCRCerr\n"
+ "RxInvalLen\n"
+ "RxInvalPKey\n"
+ "RxPktDropped\n"
+ "TxBadLength\n"
+ "TxDropped\n"
+ "TxInvalLen\n"
+ "TxUnderrun\n"
+ "TxUnsupVL\n"
+ "RxLclPhyErr\n" /* 7220 and 7322-only */
+ "RxVL15Drop\n" /* 7220 and 7322-only */
+ "RxVlErr\n" /* 7220 and 7322-only */
+ "XcessBufOvfl\n" /* 7220 and 7322-only */
+ ;
+
+#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
+static const size_t portcntr7220indices[] = {
+ QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+ cr_pktsendflow,
+ QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+ cr_pktrcvflowctrl,
+ QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+ cr_txsdmadesc,
+ cr_rxdlidfltr,
+ cr_ibstatuschange,
+ QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+ cr_rcvflowctrl_err,
+ QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+ cr_invalidslen,
+ cr_senddropped,
+ cr_errslen,
+ cr_sendunderrun,
+ cr_txunsupvl,
+ QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_7220_cntrnames(struct qib_devdata *dd)
+{
+ int i, j = 0;
+ char *s;
+
+ for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
+ i++) {
+ /* we always have at least one counter before the egrovfl */
+ if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+ j = 1;
+ s = strchr(s + 1, '\n');
+ if (s && j)
+ j++;
+ }
+ dd->cspec->ncntrs = i;
+ if (!s)
+ /* full list; size is without terminating null */
+ dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
+ else
+ dd->cspec->cntrnamelen = 1 + s - cntr7220names;
+ dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->cntrs)
+ qib_dev_err(dd, "Failed allocation for counters\n");
+
+ for (i = 0, s = (char *)portcntr7220names; s; i++)
+ s = strchr(s + 1, '\n');
+ dd->cspec->nportcntrs = i - 1;
+ dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
+ dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->portcntrs)
+ qib_dev_err(dd, "Failed allocation for portcounters\n");
+}
+
+static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+ u64 **cntrp)
+{
+ u32 ret;
+
+ if (!dd->cspec->cntrs) {
+ ret = 0;
+ goto done;
+ }
+
+ if (namep) {
+ *namep = (char *)cntr7220names;
+ ret = dd->cspec->cntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ } else {
+ u64 *cntr = dd->cspec->cntrs;
+ int i;
+
+ ret = dd->cspec->ncntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->ncntrs; i++)
+ *cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
+ }
+done:
+ return ret;
+}
+
+static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+ char **namep, u64 **cntrp)
+{
+ u32 ret;
+
+ if (!dd->cspec->portcntrs) {
+ ret = 0;
+ goto done;
+ }
+ if (namep) {
+ *namep = (char *)portcntr7220names;
+ ret = dd->cspec->portcntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ } else {
+ u64 *cntr = dd->cspec->portcntrs;
+ struct qib_pportdata *ppd = &dd->pport[port];
+ int i;
+
+ ret = dd->cspec->nportcntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->nportcntrs; i++) {
+ if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
+ *cntr++ = qib_portcntr_7220(ppd,
+ portcntr7220indices[i] &
+ ~_PORT_VIRT_FLAG);
+ else
+ *cntr++ = read_7220_creg32(dd,
+ portcntr7220indices[i]);
+ }
+ }
+done:
+ return ret;
+}
+
+/**
+ * qib_get_7220_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * This needs more work; in particular, decision on whether we really
+ * need traffic_wds done the way it is
+ * called from add_timer
+ */
+static void qib_get_7220_faststats(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_pportdata *ppd = dd->pport;
+ unsigned long flags;
+ u64 traffic_wds;
+
+ /*
+ * don't access the chip while running diags, or memory diags can
+ * fail
+ */
+ if (!(dd->flags & QIB_INITTED) || dd->diag_client)
+ /* but re-arm the timer, for diags case; won't hurt other */
+ goto done;
+
+ /*
+ * We now try to maintain an activity timer, based on traffic
+ * exceeding a threshold, so we need to check the word-counts
+ * even if they are 64-bit.
+ */
+ traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
+ qib_portcntr_7220(ppd, cr_wordrcv);
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ traffic_wds -= dd->traffic_wds;
+ dd->traffic_wds += traffic_wds;
+ if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+ atomic_add(5, &dd->active_time); /* S/B #define */
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+done:
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/*
+ * If we are using MSI, try to fallback to INTx.
+ */
+static int qib_7220_intr_fallback(struct qib_devdata *dd)
+{
+ if (!dd->msi_lo)
+ return 0;
+
+ qib_devinfo(dd->pcidev, "MSI interrupt not detected,"
+ " trying INTx interrupts\n");
+ qib_7220_free_irq(dd);
+ qib_enable_intx(dd->pcidev);
+ /*
+ * Some newer kernels require free_irq before disable_msi,
+ * and irq can be changed during disable and INTx enable
+ * and we need to therefore use the pcidev->irq value,
+ * not our saved MSI value.
+ */
+ dd->cspec->irq = dd->pcidev->irq;
+ qib_setup_7220_interrupt(dd);
+ return 1;
+}
+
+/*
+ * Reset the XGXS (between serdes and IBC). Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining. To do this right, we reset IBC
+ * as well.
+ */
+static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
+{
+ u64 val, prev_val;
+ struct qib_devdata *dd = ppd->dd;
+
+ prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ val = prev_val | QLOGIC_IB_XGXS_RESET;
+ prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
+ qib_write_kreg(dd, kr_control,
+ dd->control & ~QLOGIC_IB_C_LINKENABLE);
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+ qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
+ qib_write_kreg(dd, kr_control, dd->control);
+}
+
+/*
+ * For this chip, we want to use the same buffer every time
+ * when we are trying to bring the link up (they are always VL15
+ * packets). At that link state the packet should always go out immediately
+ * (or at least be discarded at the tx interface if the link is down).
+ * If it doesn't, and the buffer isn't available, that means some other
+ * sender has gotten ahead of us, and is preventing our packet from going
+ * out. In that case, we flush all packets, and try again. If that still
+ * fails, we fail the request, and hope things work the next time around.
+ *
+ * We don't need very complicated heuristics on whether the packet had
+ * time to go out or not, since even at SDR 1X, it goes out in very short
+ * time periods, covered by the chip reads done here and as part of the
+ * flush.
+ */
+static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
+{
+ u32 __iomem *buf;
+ u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
+ int do_cleanup;
+ unsigned long flags;
+
+ /*
+ * always blip to get avail list updated, since it's almost
+ * always needed, and is fairly cheap.
+ */
+ sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+ if (buf)
+ goto done;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
+ ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
+ __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
+ do_cleanup = 0;
+ } else {
+ do_cleanup = 1;
+ qib_7220_sdma_hw_clean_up(ppd);
+ }
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ if (do_cleanup) {
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+ }
+done:
+ return buf;
+}
+
+/*
+ * This code for non-IBTA-compliant IB speed negotiation is only known to
+ * work for the SDR to DDR transition, and only between an HCA and a switch
+ * with recent firmware. It is based on observed heuristics, rather than
+ * actual knowledge of the non-compliant speed negotiation.
+ * It has a number of hard-coded fields, since the hope is to rewrite this
+ * when a spec is available on how the negoation is intended to work.
+ */
+static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
+ u32 dcnt, u32 *data)
+{
+ int i;
+ u64 pbc;
+ u32 __iomem *piobuf;
+ u32 pnum;
+ struct qib_devdata *dd = ppd->dd;
+
+ i = 0;
+ pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
+ pbc |= PBC_7220_VL15_SEND;
+ while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
+ if (i++ > 5)
+ return;
+ udelay(2);
+ }
+ sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
+ writeq(pbc, piobuf);
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdr, 7);
+ qib_pio_copy(piobuf + 9, data, dcnt);
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pnum);
+}
+
+/*
+ * _start packet gets sent twice at start, _done gets sent twice at end
+ */
+static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
+{
+ struct qib_devdata *dd = ppd->dd;
+ static u32 swapped;
+ u32 dw, i, hcnt, dcnt, *data;
+ static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
+ static u32 madpayload_start[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
+ };
+ static u32 madpayload_done[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x40000001, 0x1388, 0x15e, /* rest 0's */
+ };
+
+ dcnt = ARRAY_SIZE(madpayload_start);
+ hcnt = ARRAY_SIZE(hdr);
+ if (!swapped) {
+ /* for maintainability, do it at runtime */
+ for (i = 0; i < hcnt; i++) {
+ dw = (__force u32) cpu_to_be32(hdr[i]);
+ hdr[i] = dw;
+ }
+ for (i = 0; i < dcnt; i++) {
+ dw = (__force u32) cpu_to_be32(madpayload_start[i]);
+ madpayload_start[i] = dw;
+ dw = (__force u32) cpu_to_be32(madpayload_done[i]);
+ madpayload_done[i] = dw;
+ }
+ swapped = 1;
+ }
+
+ data = which ? madpayload_done : madpayload_start;
+
+ autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+ autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+}
+
+/*
+ * Do the absolute minimum to cause an IB speed change, and make it
+ * ready, but don't actually trigger the change. The caller will
+ * do that when ready (if link is in Polling training state, it will
+ * happen immediately, otherwise when link next goes down)
+ *
+ * This routine should only be used as part of the DDR autonegotation
+ * code for devices that are not compliant with IB 1.2 (or code that
+ * fixes things up for same).
+ *
+ * When link has gone down, and autoneg enabled, or autoneg has
+ * failed and we give up until next time we set both speeds, and
+ * then we want IBTA enabled as well as "use max enabled speed.
+ */
+static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
+{
+ ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK);
+
+ if (speed == (QIB_IB_SDR | QIB_IB_DDR))
+ ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ else
+ ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
+ IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+
+ qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+/*
+ * This routine is only used when we are not talking to another
+ * IB 1.2-compliant device that we think can do DDR.
+ * (This includes all existing switch chips as of Oct 2007.)
+ * 1.2-compliant devices go directly to DDR prior to reaching INIT
+ */
+static void try_7220_autoneg(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+
+ /*
+ * Required for older non-IB1.2 DDR switches. Newer
+ * non-IB-compliant switches don't need it, but so far,
+ * aren't bothered by it either. "Magic constant"
+ */
+ qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ autoneg_7220_send(ppd, 0);
+ set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
+
+ toggle_7220_rclkrls(ppd->dd);
+ /* 2 msec is minimum length of a poll cycle */
+ schedule_delayed_work(&ppd->cpspec->autoneg_work,
+ msecs_to_jiffies(2));
+}
+
+/*
+ * Handle the empirically determined mechanism for auto-negotiation
+ * of DDR speed with switches.
+ */
+static void autoneg_7220_work(struct work_struct *work)
+{
+ struct qib_pportdata *ppd;
+ struct qib_devdata *dd;
+ u64 startms;
+ u32 i;
+ unsigned long flags;
+
+ ppd = &container_of(work, struct qib_chippport_specific,
+ autoneg_work.work)->pportdata;
+ dd = ppd->dd;
+
+ startms = jiffies_to_msecs(jiffies);
+
+ /*
+ * Busy wait for this first part, it should be at most a
+ * few hundred usec, since we scheduled ourselves for 2msec.
+ */
+ for (i = 0; i < 25; i++) {
+ if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
+ == IB_7220_LT_STATE_POLLQUIET) {
+ qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
+ break;
+ }
+ udelay(100);
+ }
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ goto done; /* we got there early or told to stop */
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(90)))
+ goto done;
+
+ toggle_7220_rclkrls(dd);
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(1700)))
+ goto done;
+
+ set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
+ toggle_7220_rclkrls(dd);
+
+ /*
+ * Wait up to 250 msec for link to train and get to INIT at DDR;
+ * this should terminate early.
+ */
+ wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(250));
+done:
+ if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
+ ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
+ dd->cspec->autoneg_tries = 0;
+ }
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ }
+}
+
+static u32 qib_7220_iblink_state(u64 ibcs)
+{
+ u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
+
+ switch (state) {
+ case IB_7220_L_STATE_INIT:
+ state = IB_PORT_INIT;
+ break;
+ case IB_7220_L_STATE_ARM:
+ state = IB_PORT_ARMED;
+ break;
+ case IB_7220_L_STATE_ACTIVE:
+ /* fall through */
+ case IB_7220_L_STATE_ACT_DEFER:
+ state = IB_PORT_ACTIVE;
+ break;
+ default: /* fall through */
+ case IB_7220_L_STATE_DOWN:
+ state = IB_PORT_DOWN;
+ break;
+ }
+ return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_7220_phys_portstate(u64 ibcs)
+{
+ u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
+ return qib_7220_physportstate[state];
+}
+
+static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+ int ret = 0, symadj = 0;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ if (!ibup) {
+ /*
+ * When the link goes down we don't want AEQ running, so it
+ * won't interfere with IBC training, etc., and we need
+ * to go back to the static SerDes preset values.
+ */
+ if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)))
+ set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ qib_sd7220_presets(dd);
+ qib_cancel_sends(ppd); /* initial disarm, etc. */
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ if (__qib_sdma_running(ppd))
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e70_go_idle);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ /* this might better in qib_sd7220_presets() */
+ set_7220_relock_poll(dd, ibup);
+ } else {
+ if (qib_compat_ddr_negotiate &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)) &&
+ ppd->link_speed_active == QIB_IB_SDR &&
+ (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
+ (QIB_IB_DDR | QIB_IB_SDR) &&
+ dd->cspec->autoneg_tries < AUTONEG_TRIES) {
+ /* we are SDR, and DDR auto-negotiation enabled */
+ ++dd->cspec->autoneg_tries;
+ if (!ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
+ cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
+ cr_iblinkerrrecov);
+ }
+ try_7220_autoneg(ppd);
+ ret = 1; /* no other IB status change processing */
+ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ ppd->link_speed_active == QIB_IB_SDR) {
+ autoneg_7220_send(ppd, 1);
+ set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
+ udelay(2);
+ toggle_7220_rclkrls(dd);
+ ret = 1; /* no other IB status change processing */
+ } else {
+ if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ (ppd->link_speed_active & QIB_IB_DDR)) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
+ QIBL_IB_AUTONEG_FAILED);
+ spin_unlock_irqrestore(&ppd->lflags_lock,
+ flags);
+ dd->cspec->autoneg_tries = 0;
+ /* re-enable SDR, for next link down */
+ set_7220_ibspeed_fast(ppd,
+ ppd->link_speed_enabled);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ symadj = 1;
+ } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
+ /*
+ * Clear autoneg failure flag, and do setup
+ * so we'll try next time link goes down and
+ * back to INIT (possibly connected to a
+ * different device).
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock,
+ flags);
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_IBTA_1_2_MASK;
+ qib_write_kreg(dd, kr_ncmodectrl, 0);
+ symadj = 1;
+ }
+ }
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ symadj = 1;
+
+ if (!ret) {
+ ppd->delay_mult = rate_to_delay
+ [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
+ [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
+
+ set_7220_relock_poll(dd, ibup);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ /*
+ * Unlike 7322, the 7220 needs this, due to lack of
+ * interrupt in some cases when we have sdma active
+ * when the link goes down.
+ */
+ if (ppd->sdma_state.current_state !=
+ qib_sdma_state_s20_idle)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e00_go_hw_down);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ }
+
+ if (symadj) {
+ if (ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 0;
+ ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
+ cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
+ ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
+ cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
+ }
+ } else if (!ibup && qib_compat_ddr_negotiate &&
+ !ppd->cpspec->ibdeltainprog &&
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
+ cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
+ cr_iblinkerrrecov);
+ }
+
+ if (!ret)
+ qib_setup_7220_setextled(ppd, ibup);
+ return ret;
+}
+
+/*
+ * Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+ u64 read_val, new_out;
+ unsigned long flags;
+
+ if (mask) {
+ /* some bits being written, lock access to GPIO */
+ dir &= mask;
+ out &= mask;
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+ dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+ new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_out, new_out);
+ dd->cspec->gpio_out = new_out;
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ }
+ /*
+ * It is unlikely that a read at this time would get valid
+ * data on a pin whose direction line was set in the same
+ * call to this function. We include the read here because
+ * that allows us to potentially combine a change on one pin with
+ * a read on another, and because the old code did something like
+ * this.
+ */
+ read_val = qib_read_kreg64(dd, kr_extstatus);
+ return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/*
+ * Read fundamental info we need to use the chip. These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_7220_chip_params(struct qib_devdata *dd)
+{
+ u64 val;
+ u32 piobufs;
+ int mtu;
+
+ dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+ dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+ dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+ dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+ dd->palign = qib_read_kreg32(dd, kr_palign);
+ dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+ dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+ val = qib_read_kreg64(dd, kr_sendpiosize);
+ dd->piosize2k = val & ~0U;
+ dd->piosize4k = val >> 32;
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+ dd->pport->ibmtu = (u32)mtu;
+
+ val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+ dd->piobcnt2k = val & ~0U;
+ dd->piobcnt4k = val >> 32;
+ /* these may be adjusted in init_chip_wc_pat() */
+ dd->pio2kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
+ if (dd->piobcnt4k) {
+ dd->pio4kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ (dd->piobufbase >> 32));
+ /*
+ * 4K buffers take 2 pages; we use roundup just to be
+ * paranoid; we calculate it once here, rather than on
+ * ever buf allocate
+ */
+ dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+ }
+
+ piobufs = dd->piobcnt4k + dd->piobcnt2k;
+
+ dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+ (sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * qib_get_7220_chip_params(), so split out as separate function
+ */
+static void set_7220_baseaddrs(struct qib_devdata *dd)
+{
+ u32 cregbase;
+ /* init after possible re-map in init_chip_wc_pat() */
+ cregbase = qib_read_kreg32(dd, kr_counterregbase);
+ dd->cspec->cregbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + cregbase);
+
+ dd->egrtidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->rcvegrbase);
+}
+
+
+#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \
+ SYM_MASK(SendCtrl, SPioEnable) | \
+ SYM_MASK(SendCtrl, SSpecialTriggerEn) | \
+ SYM_MASK(SendCtrl, SendBufAvailUpd) | \
+ SYM_MASK(SendCtrl, AvailUpdThld) | \
+ SYM_MASK(SendCtrl, SDmaEnable) | \
+ SYM_MASK(SendCtrl, SDmaIntEnable) | \
+ SYM_MASK(SendCtrl, SDmaHalt) | \
+ SYM_MASK(SendCtrl, SDmaSingleDescriptor))
+
+static int sendctrl_hook(struct qib_devdata *dd,
+ const struct diag_observer *op,
+ u32 offs, u64 *data, u64 mask, int only_32)
+{
+ unsigned long flags;
+ unsigned idx = offs / sizeof(u64);
+ u64 local_data, all_bits;
+
+ if (idx != kr_sendctrl) {
+ qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
+ offs, only_32 ? "32" : "64");
+ return 0;
+ }
+
+ all_bits = ~0ULL;
+ if (only_32)
+ all_bits >>= 32;
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if ((mask & all_bits) != all_bits) {
+ /*
+ * At least some mask bits are zero, so we need
+ * to read. The judgement call is whether from
+ * reg or shadow. First-cut: read reg, and complain
+ * if any bits which should be shadowed are different
+ * from their shadowed value.
+ */
+ if (only_32)
+ local_data = (u64)qib_read_kreg32(dd, idx);
+ else
+ local_data = qib_read_kreg64(dd, idx);
+ qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
+ (u32)local_data, (u32)dd->sendctrl);
+ if ((local_data & SENDCTRL_SHADOWED) !=
+ (dd->sendctrl & SENDCTRL_SHADOWED))
+ qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
+ (u32)local_data, (u32) dd->sendctrl);
+ *data = (local_data & ~mask) | (*data & mask);
+ }
+ if (mask) {
+ /*
+ * At least some mask bits are one, so we need
+ * to write, but only shadow some bits.
+ */
+ u64 sval, tval; /* Shadowed, transient */
+
+ /*
+ * New shadow val is bits we don't want to touch,
+ * ORed with bits we do, that are intended for shadow.
+ */
+ sval = (dd->sendctrl & ~mask);
+ sval |= *data & SENDCTRL_SHADOWED & mask;
+ dd->sendctrl = sval;
+ tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
+ qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
+ (u32)tval, (u32)sval);
+ qib_write_kreg(dd, kr_sendctrl, tval);
+ qib_write_kreg(dd, kr_scratch, 0Ull);
+ }
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ return only_32 ? 4 : 8;
+}
+
+static const struct diag_observer sendctrl_observer = {
+ sendctrl_hook, kr_sendctrl * sizeof(u64),
+ kr_sendctrl * sizeof(u64)
+};
+
+/*
+ * write the final few registers that depend on some of the
+ * init setup. Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_7220_initreg(struct qib_devdata *dd)
+{
+ int ret = 0;
+ u64 val;
+
+ qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+ qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+ qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+ qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+ val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+ if (val != dd->pioavailregs_phys) {
+ qib_dev_err(dd, "Catastrophic software error, "
+ "SendPIOAvailAddr written as %lx, "
+ "read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
+ ret = -EINVAL;
+ }
+ qib_register_observer(dd, &sendctrl_observer);
+ return ret;
+}
+
+static int qib_init_7220_variables(struct qib_devdata *dd)
+{
+ struct qib_chippport_specific *cpspec;
+ struct qib_pportdata *ppd;
+ int ret = 0;
+ u32 sbufs, updthresh;
+
+ cpspec = (struct qib_chippport_specific *)(dd + 1);
+ ppd = &cpspec->pportdata;
+ dd->pport = ppd;
+ dd->num_pports = 1;
+
+ dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
+ ppd->cpspec = cpspec;
+
+ spin_lock_init(&dd->cspec->sdepb_lock);
+ spin_lock_init(&dd->cspec->rcvmod_lock);
+ spin_lock_init(&dd->cspec->gpio_lock);
+
+ /* we haven't yet set QIB_PRESENT, so use read directly */
+ dd->revision = readq(&dd->kregbase[kr_revision]);
+
+ if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+ qib_dev_err(dd, "Revision register read failure, "
+ "giving up initialization\n");
+ ret = -ENODEV;
+ goto bail;
+ }
+ dd->flags |= QIB_PRESENT; /* now register routines work */
+
+ dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMajor);
+ dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMinor);
+
+ get_7220_chip_params(dd);
+ qib_7220_boardname(dd);
+
+ /*
+ * GPIO bits for TWSI data and clock,
+ * used for serial EEPROM.
+ */
+ dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+ dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+ dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
+
+ dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
+ QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
+ dd->flags |= qib_special_trigger ?
+ QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
+
+ /*
+ * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+ * 2 is Some Misc, 3 is reserved for future.
+ */
+ dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
+
+ dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
+
+ dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
+
+ init_waitqueue_head(&cpspec->autoneg_wait);
+ INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
+
+ qib_init_pportdata(ppd, dd, 0, 1);
+ ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+ ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
+
+ ppd->link_width_enabled = ppd->link_width_supported;
+ ppd->link_speed_enabled = ppd->link_speed_supported;
+ /*
+ * Set the initial values to reasonable default, will be set
+ * for real when link is up.
+ */
+ ppd->link_width_active = IB_WIDTH_4X;
+ ppd->link_speed_active = QIB_IB_SDR;
+ ppd->delay_mult = rate_to_delay[0][1];
+ ppd->vls_supported = IB_VL_VL0;
+ ppd->vls_operational = ppd->vls_supported;
+
+ if (!qib_mini_init)
+ qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
+
+ init_timer(&ppd->cpspec->chase_timer);
+ ppd->cpspec->chase_timer.function = reenable_7220_chase;
+ ppd->cpspec->chase_timer.data = (unsigned long)ppd;
+
+ qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
+
+ dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+ dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+ dd->rhf_offset =
+ dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
+
+ /* we always allocate at least 2048 bytes for eager buffers */
+ ret = ib_mtu_enum_to_int(qib_ibmtu);
+ dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+
+ qib_7220_tidtemplate(dd);
+
+ /*
+ * We can request a receive interrupt for 1 or
+ * more packets from current offset. For now, we set this
+ * up for a single packet.
+ */
+ dd->rhdrhead_intr_off = 1ULL << 32;
+
+ /* setup the stats timer; the add_timer is done at end of init */
+ init_timer(&dd->stats_timer);
+ dd->stats_timer.function = qib_get_7220_faststats;
+ dd->stats_timer.data = (unsigned long) dd;
+ dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
+
+ /*
+ * Control[4] has been added to change the arbitration within
+ * the SDMA engine between favoring data fetches over descriptor
+ * fetches. qib_sdma_fetch_arb==0 gives data fetches priority.
+ */
+ if (qib_sdma_fetch_arb)
+ dd->control |= 1 << 4;
+
+ dd->ureg_align = 0x10000; /* 64KB alignment */
+
+ dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
+ qib_7220_config_ctxts(dd);
+ qib_set_ctxtcnt(dd); /* needed for PAT setup */
+
+ if (qib_wc_pat) {
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
+ }
+ set_7220_baseaddrs(dd); /* set chip access pointers now */
+
+ ret = 0;
+ if (qib_mini_init)
+ goto bail;
+
+ ret = qib_create_ctxts(dd);
+ init_7220_cntrnames(dd);
+
+ /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
+ * reserve the update threshold amount for other kernel use, such
+ * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
+ * unless we aren't enabling SDMA, in which case we want to use
+ * all the 4k bufs for the kernel.
+ * if this was less than the update threshold, we could wait
+ * a long time for an update. Coded this way because we
+ * sometimes change the update threshold for various reasons,
+ * and we want this to remain robust.
+ */
+ updthresh = 8U; /* update threshold */
+ if (dd->flags & QIB_HAS_SEND_DMA) {
+ dd->cspec->sdmabufcnt = dd->piobcnt4k;
+ sbufs = updthresh > 3 ? updthresh : 3;
+ } else {
+ dd->cspec->sdmabufcnt = 0;
+ sbufs = dd->piobcnt4k;
+ }
+
+ dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
+ dd->cspec->sdmabufcnt;
+ dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
+ dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
+ dd->pbufsctxt = dd->lastctxt_piobuf /
+ (dd->cfgctxts - dd->first_user_ctxt);
+
+ /*
+ * if we are at 16 user contexts, we will have one 7 sbufs
+ * per context, so drop the update threshold to match. We
+ * want to update before we actually run out, at low pbufs/ctxt
+ * so give ourselves some margin
+ */
+ if ((dd->pbufsctxt - 2) < updthresh)
+ updthresh = dd->pbufsctxt - 2;
+
+ dd->cspec->updthresh_dflt = updthresh;
+ dd->cspec->updthresh = updthresh;
+
+ /* before full enable, no interrupts, no locking needed */
+ dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld);
+
+ dd->psxmitwait_supported = 1;
+ dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
+bail:
+ return ret;
+}
+
+static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+ u32 *pbufnum)
+{
+ u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+ struct qib_devdata *dd = ppd->dd;
+ u32 __iomem *buf;
+
+ if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
+ buf = get_7220_link_buf(ppd, pbufnum);
+ else {
+ if ((plen + 1) > dd->piosize2kmax_dwords)
+ first = dd->piobcnt2k;
+ else
+ first = 0;
+ /* try 4k if all 2k busy, so same last for both sizes */
+ last = dd->cspec->lastbuf_for_pio;
+ buf = qib_getsendbuf_range(dd, pbufnum, first, last);
+ }
+ return buf;
+}
+
+/* these 2 "counters" are really control registers, and are always RW */
+static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
+ u32 start)
+{
+ write_7220_creg(ppd->dd, cr_psinterval, intv);
+ write_7220_creg(ppd->dd, cr_psstart, start);
+}
+
+/*
+ * NOTE: no real attempt is made to generalize the SDMA stuff.
+ * At some point "soon" we will have a new more generalized
+ * set of sdma interface, and then we'll clean this up.
+ */
+
+/* Must be called with sdma_lock held, or before init finished */
+static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
+{
+ /* Commit writes to memory and advance the tail on the chip */
+ wmb();
+ ppd->sdma_descq_tail = tail;
+ qib_write_kreg(ppd->dd, kr_senddmatail, tail);
+}
+
+static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+}
+
+static struct sdma_set_state_action sdma_7220_action_table[] = {
+ [qib_sdma_state_s00_hw_down] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .go_s99_running_tofalse = 1,
+ },
+ [qib_sdma_state_s10_hw_start_up_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s20_idle] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s30_sw_clean_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 0,
+ },
+ [qib_sdma_state_s40_hw_clean_up_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s50_hw_halt_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s99_running] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .go_s99_running_totrue = 1,
+ },
+};
+
+static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
+{
+ ppd->sdma_state.set_state_action = sdma_7220_action_table;
+}
+
+static int init_sdma_7220_regs(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned i, n;
+ u64 senddmabufmask[3] = { 0 };
+
+ /* Set SendDmaBase */
+ qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
+ qib_sdma_7220_setlengen(ppd);
+ qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
+ /* Set SendDmaHeadAddr */
+ qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
+
+ /*
+ * Reserve all the former "kernel" piobufs, using high number range
+ * so we get as many 4K buffers as possible
+ */
+ n = dd->piobcnt2k + dd->piobcnt4k;
+ i = n - dd->cspec->sdmabufcnt;
+
+ for (; i < n; ++i) {
+ unsigned word = i / 64;
+ unsigned bit = i & 63;
+
+ BUG_ON(word >= 3);
+ senddmabufmask[word] |= 1ULL << bit;
+ }
+ qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
+ qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
+ qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
+
+ ppd->sdma_state.first_sendbuf = i;
+ ppd->sdma_state.last_sendbuf = n;
+
+ return 0;
+}
+
+/* sdma_lock must be held */
+static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int sane;
+ int use_dmahead;
+ u16 swhead;
+ u16 swtail;
+ u16 cnt;
+ u16 hwhead;
+
+ use_dmahead = __qib_sdma_running(ppd) &&
+ (dd->flags & QIB_HAS_SDMA_TIMEOUT);
+retry:
+ hwhead = use_dmahead ?
+ (u16)le64_to_cpu(*ppd->sdma_head_dma) :
+ (u16)qib_read_kreg32(dd, kr_senddmahead);
+
+ swhead = ppd->sdma_descq_head;
+ swtail = ppd->sdma_descq_tail;
+ cnt = ppd->sdma_descq_cnt;
+
+ if (swhead < swtail) {
+ /* not wrapped */
+ sane = (hwhead >= swhead) & (hwhead <= swtail);
+ } else if (swhead > swtail) {
+ /* wrapped around */
+ sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
+ (hwhead <= swtail);
+ } else {
+ /* empty */
+ sane = (hwhead == swhead);
+ }
+
+ if (unlikely(!sane)) {
+ if (use_dmahead) {
+ /* try one more time, directly from the register */
+ use_dmahead = 0;
+ goto retry;
+ }
+ /* assume no progress */
+ hwhead = swhead;
+ }
+
+ return hwhead;
+}
+
+static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
+{
+ u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
+
+ return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
+ (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
+ (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
+ !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
+}
+
+/*
+ * Compute the amount of delay before sending the next packet if the
+ * port's send rate differs from the static rate set for the QP.
+ * Since the delay affects this packet but the amount of the delay is
+ * based on the length of the previous packet, use the last delay computed
+ * and save the delay count for this packet to be used next time
+ * we get here.
+ */
+static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+ u8 srate, u8 vl)
+{
+ u8 snd_mult = ppd->delay_mult;
+ u8 rcv_mult = ib_rate_to_delay[srate];
+ u32 ret = ppd->cpspec->last_delay_mult;
+
+ ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
+ (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
+
+ /* Indicate VL15, if necessary */
+ if (vl == 15)
+ ret |= PBC_7220_VL15_SEND_CTRL;
+ return ret;
+}
+
+static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
+{
+}
+
+static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
+{
+ if (!rcd->ctxt) {
+ rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
+ rcd->rcvegr_tid_base = 0;
+ } else {
+ rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
+ rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
+ (rcd->ctxt - 1) * rcd->rcvegrcnt;
+ }
+}
+
+static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
+ u32 len, u32 which, struct qib_ctxtdata *rcd)
+{
+ int i;
+ unsigned long flags;
+
+ switch (which) {
+ case TXCHK_CHG_TYPE_KERN:
+ /* see if we need to raise avail update threshold */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ for (i = dd->first_user_ctxt;
+ dd->cspec->updthresh != dd->cspec->updthresh_dflt
+ && i < dd->cfgctxts; i++)
+ if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
+ ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
+ < dd->cspec->updthresh_dflt)
+ break;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ if (i == dd->cfgctxts) {
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ dd->cspec->updthresh = dd->cspec->updthresh_dflt;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld)) <<
+ SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ }
+ break;
+ case TXCHK_CHG_TYPE_USER:
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
+ / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
+ dd->cspec->updthresh = (rcd->piocnt /
+ rcd->subctxt_cnt) - 1;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ } else
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ break;
+ }
+}
+
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+ qib_write_kreg(dd, kr_scratch, val);
+}
+
+#define VALID_TS_RD_REG_MASK 0xBF
+/**
+ * qib_7220_tempsense_read - read register of temp sensor via TWSI
+ * @dd: the qlogic_ib device
+ * @regnum: register to read from
+ *
+ * returns reg contents (0..255) or < 0 for error
+ */
+static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+ int ret;
+ u8 rdata;
+
+ if (regnum > 7) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* return a bogus value for (the one) register we do not have */
+ if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
+ ret = 0;
+ goto bail;
+ }
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret)
+ goto bail;
+
+ ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
+ if (!ret)
+ ret = rdata;
+
+ mutex_unlock(&dd->eep_lock);
+
+ /*
+ * There are three possibilities here:
+ * ret is actual value (0..255)
+ * ret is -ENXIO or -EINVAL from twsi code or this file
+ * ret is -EINTR from mutex_lock_interruptible.
+ */
+bail:
+ return ret;
+}
+
+/* Dummy function, as 7220 boards never disable EEPROM Write */
+static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+ return 1;
+}
+
+/**
+ * qib_init_iba7220_funcs - set up the chip-specific function pointers
+ * @dev: the pci_dev for qlogic_ib device
+ * @ent: pci_device_id struct for this dev
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct qib_devdata *dd;
+ int ret;
+ u32 boardid, minwidth;
+
+ dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
+ sizeof(struct qib_chippport_specific));
+ if (IS_ERR(dd))
+ goto bail;
+
+ dd->f_bringup_serdes = qib_7220_bringup_serdes;
+ dd->f_cleanup = qib_setup_7220_cleanup;
+ dd->f_clear_tids = qib_7220_clear_tids;
+ dd->f_free_irq = qib_7220_free_irq;
+ dd->f_get_base_info = qib_7220_get_base_info;
+ dd->f_get_msgheader = qib_7220_get_msgheader;
+ dd->f_getsendbuf = qib_7220_getsendbuf;
+ dd->f_gpio_mod = gpio_7220_mod;
+ dd->f_eeprom_wen = qib_7220_eeprom_wen;
+ dd->f_hdrqempty = qib_7220_hdrqempty;
+ dd->f_ib_updown = qib_7220_ib_updown;
+ dd->f_init_ctxt = qib_7220_init_ctxt;
+ dd->f_initvl15_bufs = qib_7220_initvl15_bufs;
+ dd->f_intr_fallback = qib_7220_intr_fallback;
+ dd->f_late_initreg = qib_late_7220_initreg;
+ dd->f_setpbc_control = qib_7220_setpbc_control;
+ dd->f_portcntr = qib_portcntr_7220;
+ dd->f_put_tid = qib_7220_put_tid;
+ dd->f_quiet_serdes = qib_7220_quiet_serdes;
+ dd->f_rcvctrl = rcvctrl_7220_mod;
+ dd->f_read_cntrs = qib_read_7220cntrs;
+ dd->f_read_portcntrs = qib_read_7220portcntrs;
+ dd->f_reset = qib_setup_7220_reset;
+ dd->f_init_sdma_regs = init_sdma_7220_regs;
+ dd->f_sdma_busy = qib_sdma_7220_busy;
+ dd->f_sdma_gethead = qib_sdma_7220_gethead;
+ dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl;
+ dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
+ dd->f_sdma_update_tail = qib_sdma_update_7220_tail;
+ dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up;
+ dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up;
+ dd->f_sdma_init_early = qib_7220_sdma_init_early;
+ dd->f_sendctrl = sendctrl_7220_mod;
+ dd->f_set_armlaunch = qib_set_7220_armlaunch;
+ dd->f_set_cntr_sample = qib_set_cntr_7220_sample;
+ dd->f_iblink_state = qib_7220_iblink_state;
+ dd->f_ibphys_portstate = qib_7220_phys_portstate;
+ dd->f_get_ib_cfg = qib_7220_get_ib_cfg;
+ dd->f_set_ib_cfg = qib_7220_set_ib_cfg;
+ dd->f_set_ib_loopback = qib_7220_set_loopback;
+ dd->f_set_intr_state = qib_7220_set_intr_state;
+ dd->f_setextled = qib_setup_7220_setextled;
+ dd->f_txchk_change = qib_7220_txchk_change;
+ dd->f_update_usrhead = qib_update_7220_usrhead;
+ dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr;
+ dd->f_xgxs_reset = qib_7220_xgxs_reset;
+ dd->f_writescratch = writescratch;
+ dd->f_tempsense_rd = qib_7220_tempsense_rd;
+ /*
+ * Do remaining pcie setup and save pcie values in dd.
+ * Any error printing is already done by the init code.
+ * On return, we have the chip mapped, but chip registers
+ * are not set up until start of qib_init_7220_variables.
+ */
+ ret = qib_pcie_ddinit(dd, pdev, ent);
+ if (ret < 0)
+ goto bail_free;
+
+ /* initialize chip-specific variables */
+ ret = qib_init_7220_variables(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ if (qib_mini_init)
+ goto bail;
+
+ boardid = SYM_FIELD(dd->revision, Revision,
+ BoardID);
+ switch (boardid) {
+ case 0:
+ case 2:
+ case 10:
+ case 12:
+ minwidth = 16; /* x16 capable boards */
+ break;
+ default:
+ minwidth = 8; /* x8 capable boards */
+ break;
+ }
+ if (qib_pcie_params(dd, minwidth, NULL, NULL))
+ qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+
+ /* save IRQ for possible later use */
+ dd->cspec->irq = pdev->irq;
+
+ if (qib_read_kreg64(dd, kr_hwerrstatus) &
+ QLOGIC_IB_HWE_SERDESPLLFAILED)
+ qib_write_kreg(dd, kr_hwerrclear,
+ QLOGIC_IB_HWE_SERDESPLLFAILED);
+
+ /* setup interrupt handler (interrupt type handled above) */
+ qib_setup_7220_interrupt(dd);
+ qib_7220_init_hwerrors(dd);
+
+ /* clear diagctrl register, in case diags were running and crashed */
+ qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+ goto bail;
+
+bail_cleanup:
+ qib_pcie_ddcleanup(dd);
+bail_free:
+ qib_free_devdata(dd);
+ dd = ERR_PTR(ret);
+bail:
+ return dd;
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
new file mode 100644
index 000000000000..503992d9c5ce
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -0,0 +1,7645 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains all of the code that is specific to the
+ * InfiniPath 7322 chip
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+#include "qib_7322_regs.h"
+#include "qib_qsfp.h"
+
+#include "qib_mad.h"
+
+static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
+static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
+static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
+static irqreturn_t qib_7322intr(int irq, void *data);
+static irqreturn_t qib_7322bufavail(int irq, void *data);
+static irqreturn_t sdma_intr(int irq, void *data);
+static irqreturn_t sdma_idle_intr(int irq, void *data);
+static irqreturn_t sdma_progress_intr(int irq, void *data);
+static irqreturn_t sdma_cleanup_intr(int irq, void *data);
+static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
+ struct qib_ctxtdata *rcd);
+static u8 qib_7322_phys_portstate(u64);
+static u32 qib_7322_iblink_state(u64);
+static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd);
+static void force_h1(struct qib_pportdata *);
+static void adj_tx_serdes(struct qib_pportdata *);
+static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
+static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
+
+static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
+static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
+
+#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
+
+/* LE2 serdes values for different cases */
+#define LE2_DEFAULT 5
+#define LE2_5m 4
+#define LE2_QME 0
+
+/* Below is special-purpose, so only really works for the IB SerDes blocks. */
+#define IBSD(hw_pidx) (hw_pidx + 2)
+
+/* these are variables for documentation and experimentation purposes */
+static const unsigned rcv_int_timeout = 375;
+static const unsigned rcv_int_count = 16;
+static const unsigned sdma_idle_cnt = 64;
+
+/* Time to stop altering Rx Equalization parameters, after link up. */
+#define RXEQ_DISABLE_MSECS 2500
+
+/*
+ * Number of VLs we are configured to use (to allow for more
+ * credits per vl, etc.)
+ */
+ushort qib_num_cfg_vls = 2;
+module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
+MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
+
+static ushort qib_chase = 1;
+module_param_named(chase, qib_chase, ushort, S_IRUGO);
+MODULE_PARM_DESC(chase, "Enable state chase handling");
+
+static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
+module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
+MODULE_PARM_DESC(long_attenuation, \
+ "attenuation cutoff (dB) for long copper cable setup");
+
+static ushort qib_singleport;
+module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
+MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
+
+#define MAX_ATTEN_LEN 64 /* plenty for any real system */
+/* for read back, default index is ~5m copper cable */
+static char txselect_list[MAX_ATTEN_LEN] = "10";
+static struct kparam_string kp_txselect = {
+ .string = txselect_list,
+ .maxlen = MAX_ATTEN_LEN
+};
+static int setup_txselect(const char *, struct kernel_param *);
+module_param_call(txselect, setup_txselect, param_get_string,
+ &kp_txselect, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(txselect, \
+ "Tx serdes indices (for no QSFP or invalid QSFP data)");
+
+#define BOARD_QME7342 5
+#define BOARD_QMH7342 6
+#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
+ BOARD_QMH7342)
+#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
+ BOARD_QME7342)
+
+#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
+
+#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
+
+#define MASK_ACROSS(lsb, msb) \
+ (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
+
+#define SYM_RMASK(regname, fldname) ((u64) \
+ QIB_7322_##regname##_##fldname##_RMASK)
+
+#define SYM_MASK(regname, fldname) ((u64) \
+ QIB_7322_##regname##_##fldname##_RMASK << \
+ QIB_7322_##regname##_##fldname##_LSB)
+
+#define SYM_FIELD(value, regname, fldname) ((u64) \
+ (((value) >> SYM_LSB(regname, fldname)) & \
+ SYM_RMASK(regname, fldname)))
+
+/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
+#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
+ (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
+
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
+#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
+#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
+/* Below because most, but not all, fields of IntMask have that full suffix */
+#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
+
+
+#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
+
+/*
+ * the size bits give us 2^N, in KB units. 0 marks as invalid,
+ * and 7 is reserved. We currently use only 2KB and 4KB
+ */
+#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
+#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
+#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
+#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
+
+#define SendIBSLIDAssignMask \
+ QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
+#define SendIBSLMCMask \
+ QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
+
+#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
+#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
+#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
+#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
+#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
+#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+#define QIB_EEPROM_WEN_NUM 14
+#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
+
+/* HW counter clock is at 4nsec */
+#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
+
+/* full speed IB port 1 only */
+#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
+#define PORT_SPD_CAP_SHIFT 3
+
+/* full speed featuremask, both ports */
+#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
+
+/*
+ * This file contains almost all the chip-specific register information and
+ * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
+ */
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_contextcnt KREG_IDX(ContextCnt)
+#define kr_control KREG_IDX(Control)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_debugportval KREG_IDX(DebugPortValueReg)
+#define kr_fmask KREG_IDX(feature_mask)
+#define kr_act_fmask KREG_IDX(active_feature_mask)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intredirect KREG_IDX(IntRedirect0)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_pagealign KREG_IDX(PageAlign)
+#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
+#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_revision KREG_IDX(Revision)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
+#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
+#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
+#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
+#define kr_sendpiosize KREG_IDX(SendBufSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_intgranted KREG_IDX(Int_Granted)
+#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
+#define kr_intblocked KREG_IDX(IntBlocked)
+#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
+
+/*
+ * per-port kernel registers. Access only with qib_read_kreg_port()
+ * or qib_write_kreg_port()
+ */
+#define krp_errclear KREG_IBPORT_IDX(ErrClear)
+#define krp_errmask KREG_IBPORT_IDX(ErrMask)
+#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
+#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
+#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
+#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
+#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
+#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
+#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
+#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
+#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
+#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
+#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
+#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
+#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
+#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
+#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
+#define krp_psstart KREG_IBPORT_IDX(PSStart)
+#define krp_psstat KREG_IBPORT_IDX(PSStat)
+#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
+#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
+#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
+#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
+#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
+#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
+#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
+#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
+#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
+#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
+#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
+#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
+#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
+#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
+#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
+#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
+#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
+#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
+#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
+#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
+#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
+#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
+#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
+#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
+#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
+#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
+#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
+#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
+#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
+#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
+#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
+
+/*
+ * Per-context kernel registers. Acess only with qib_read_kreg_ctxt()
+ * or qib_write_kreg_ctxt()
+ */
+#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+/*
+ * TID Flow table, per context. Reduces
+ * number of hdrq updates to one per flow (or on errors).
+ * context 0 and 1 share same memory, but have distinct
+ * addresses. Since for now, we never use expected sends
+ * on kernel contexts, we don't worry about that (we initialize
+ * those entries for ctxt 0/1 on driver load twice, for example).
+ */
+#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
+#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
+
+/* these are the error bits in the tid flows, and are W1C */
+#define TIDFLOW_ERRBITS ( \
+ (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
+ SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
+ (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
+ SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
+
+/* Most (not all) Counters are per-IBport.
+ * Requires LBIntCnt is at offset 0 in the group
+ */
+#define CREG_IDX(regname) \
+((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
+
+#define crp_badformat CREG_IDX(RxVersionErrCnt)
+#define crp_err_rlen CREG_IDX(RxLenErrCnt)
+#define crp_erricrc CREG_IDX(RxICRCErrCnt)
+#define crp_errlink CREG_IDX(RxLinkMalformCnt)
+#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
+#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
+#define crp_pktrcv CREG_IDX(RxDataPktCnt)
+#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define crp_pktsend CREG_IDX(TxDataPktCnt)
+#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
+#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
+#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
+#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
+#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
+#define crp_rcvebp CREG_IDX(RxEBPCnt)
+#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
+#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
+#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
+#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
+#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
+#define crp_sendstall CREG_IDX(TxFlowStallCnt)
+#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
+#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
+#define crp_txlenerr CREG_IDX(TxLenErrCnt)
+#define crp_txlenerr CREG_IDX(TxLenErrCnt)
+#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
+#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
+#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
+#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
+#define crp_wordrcv CREG_IDX(RxDwordCnt)
+#define crp_wordsend CREG_IDX(TxDwordCnt)
+#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
+
+/* these are the (few) counters that are not port-specific */
+#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
+ QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
+#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
+#define cr_lbint CREG_DEVIDX(LBIntCnt)
+#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
+#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
+#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
+#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
+#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
+
+/* no chip register for # of IB ports supported, so define */
+#define NUM_IB_PORTS 2
+
+/* 1 VL15 buffer per hardware IB port, no register for this, so define */
+#define NUM_VL15_BUFS NUM_IB_PORTS
+
+/*
+ * context 0 and 1 are special, and there is no chip register that
+ * defines this value, so we have to define it here.
+ * These are all allocated to either 0 or 1 for single port
+ * hardware configuration, otherwise each gets half
+ */
+#define KCTXT0_EGRCNT 2048
+
+/* values for vl and port fields in PBC, 7322-specific */
+#define PBC_PORT_SEL_LSB 26
+#define PBC_PORT_SEL_RMASK 1
+#define PBC_VL_NUM_LSB 27
+#define PBC_VL_NUM_RMASK 7
+#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
+#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
+
+static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
+ [IB_RATE_2_5_GBPS] = 16,
+ [IB_RATE_5_GBPS] = 8,
+ [IB_RATE_10_GBPS] = 4,
+ [IB_RATE_20_GBPS] = 2,
+ [IB_RATE_30_GBPS] = 2,
+ [IB_RATE_40_GBPS] = 1
+};
+
+#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
+#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
+
+/* link training states, from IBC */
+#define IB_7322_LT_STATE_DISABLED 0x00
+#define IB_7322_LT_STATE_LINKUP 0x01
+#define IB_7322_LT_STATE_POLLACTIVE 0x02
+#define IB_7322_LT_STATE_POLLQUIET 0x03
+#define IB_7322_LT_STATE_SLEEPDELAY 0x04
+#define IB_7322_LT_STATE_SLEEPQUIET 0x05
+#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
+#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
+#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
+#define IB_7322_LT_STATE_CFGIDLE 0x0b
+#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
+#define IB_7322_LT_STATE_TXREVLANES 0x0d
+#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
+#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
+#define IB_7322_LT_STATE_CFGENH 0x10
+#define IB_7322_LT_STATE_CFGTEST 0x11
+
+/* link state machine states from IBC */
+#define IB_7322_L_STATE_DOWN 0x0
+#define IB_7322_L_STATE_INIT 0x1
+#define IB_7322_L_STATE_ARM 0x2
+#define IB_7322_L_STATE_ACTIVE 0x3
+#define IB_7322_L_STATE_ACT_DEFER 0x4
+
+static const u8 qib_7322_physportstate[0x20] = {
+ [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+ [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+ [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+ [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+ [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGRCVFCFG] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGWAITRMT] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
+ [IB_7322_LT_STATE_RECOVERRETRAIN] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7322_LT_STATE_RECOVERWAITRMT] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7322_LT_STATE_RECOVERIDLE] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
+ [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
+ [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+struct qib_chip_specific {
+ u64 __iomem *cregbase;
+ u64 *cntrs;
+ spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+ spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+ u64 main_int_mask; /* clear bits which have dedicated handlers */
+ u64 int_enable_mask; /* for per port interrupts in single port mode */
+ u64 errormask;
+ u64 hwerrmask;
+ u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+ u64 gpio_mask; /* shadow the gpio mask register */
+ u64 extctrl; /* shadow the gpio output enable, etc... */
+ u32 ncntrs;
+ u32 nportcntrs;
+ u32 cntrnamelen;
+ u32 portcntrnamelen;
+ u32 numctxts;
+ u32 rcvegrcnt;
+ u32 updthresh; /* current AvailUpdThld */
+ u32 updthresh_dflt; /* default AvailUpdThld */
+ u32 r1;
+ int irq;
+ u32 num_msix_entries;
+ u32 sdmabufcnt;
+ u32 lastbuf_for_pio;
+ u32 stay_in_freeze;
+ u32 recovery_ports_initted;
+ struct msix_entry *msix_entries;
+ void **msix_arg;
+ unsigned long *sendchkenable;
+ unsigned long *sendgrhchk;
+ unsigned long *sendibchk;
+ u32 rcvavail_timeout[18];
+ char emsgbuf[128]; /* for device error interrupt msg buffer */
+};
+
+/* Table of entries in "human readable" form Tx Emphasis. */
+struct txdds_ent {
+ u8 amp;
+ u8 pre;
+ u8 main;
+ u8 post;
+};
+
+struct vendor_txdds_ent {
+ u8 oui[QSFP_VOUI_LEN];
+ u8 *partnum;
+ struct txdds_ent sdr;
+ struct txdds_ent ddr;
+ struct txdds_ent qdr;
+};
+
+static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
+
+#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
+#define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */
+#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
+
+#define H1_FORCE_VAL 8
+#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
+#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
+
+/* The static and dynamic registers are paired, and the pairs indexed by spd */
+#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
+ + ((spd) * 2))
+
+#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
+#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
+#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
+#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
+#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
+
+struct qib_chippport_specific {
+ u64 __iomem *kpregbase;
+ u64 __iomem *cpregbase;
+ u64 *portcntrs;
+ struct qib_pportdata *ppd;
+ wait_queue_head_t autoneg_wait;
+ struct delayed_work autoneg_work;
+ struct delayed_work ipg_work;
+ struct timer_list chase_timer;
+ /*
+ * these 5 fields are used to establish deltas for IB symbol
+ * errors and linkrecovery errors. They can be reported on
+ * some chips during link negotiation prior to INIT, and with
+ * DDR when faking DDR negotiations with non-IBTA switches.
+ * The chip counters are adjusted at driver unload if there is
+ * a non-zero delta.
+ */
+ u64 ibdeltainprog;
+ u64 ibsymdelta;
+ u64 ibsymsnap;
+ u64 iblnkerrdelta;
+ u64 iblnkerrsnap;
+ u64 iblnkdownsnap;
+ u64 iblnkdowndelta;
+ u64 ibmalfdelta;
+ u64 ibmalfsnap;
+ u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
+ u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
+ u64 qdr_dfe_time;
+ u64 chase_end;
+ u32 autoneg_tries;
+ u32 recovery_init;
+ u32 qdr_dfe_on;
+ u32 qdr_reforce;
+ /*
+ * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
+ * entry zero is unused, to simplify indexing
+ */
+ u8 h1_val;
+ u8 no_eep; /* txselect table index to use if no qsfp info */
+ u8 ipg_tries;
+ u8 ibmalfusesnap;
+ struct qib_qsfp_data qsfp_data;
+ char epmsgbuf[192]; /* for port error interrupt msg buffer */
+};
+
+static struct {
+ const char *name;
+ irq_handler_t handler;
+ int lsb;
+ int port; /* 0 if not port-specific, else port # */
+} irq_table[] = {
+ { QIB_DRV_NAME, qib_7322intr, -1, 0 },
+ { QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
+ SYM_LSB(IntStatus, SendBufAvail), 0 },
+ { QIB_DRV_NAME " (sdma 0)", sdma_intr,
+ SYM_LSB(IntStatus, SDmaInt_0), 1 },
+ { QIB_DRV_NAME " (sdma 1)", sdma_intr,
+ SYM_LSB(IntStatus, SDmaInt_1), 2 },
+ { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
+ SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
+ { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
+ SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
+ { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
+ SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
+ { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
+ SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
+ { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
+ SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
+ { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
+ SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
+};
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+
+#define BLOB_7322_IBCHG 0x101
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u32 regno, u64 value);
+static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
+static void write_7322_initregs(struct qib_devdata *);
+static void write_7322_init_portregs(struct qib_pportdata *);
+static void setup_7322_link_recovery(struct qib_pportdata *, u32);
+static void check_7322_rxe_status(struct qib_pportdata *);
+static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(regno + (u64 __iomem *)(
+ (dd->ureg_align * ctxt) + (dd->userbase ?
+ (char __iomem *)dd->userbase :
+ (char __iomem *)dd->kregbase + dd->uregbase)));
+}
+
+/**
+ * qib_read_ureg - read virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u64 qib_read_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(regno + (u64 __iomem *)(
+ (dd->ureg_align * ctxt) + (dd->userbase ?
+ (char __iomem *)dd->userbase :
+ (char __iomem *)dd->kregbase + dd->uregbase)));
+}
+
+/**
+ * qib_write_ureg - write virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, u64 value, int ctxt)
+{
+ u64 __iomem *ubase;
+ if (dd->userbase)
+ ubase = (u64 __iomem *)
+ ((char __iomem *) dd->userbase +
+ dd->ureg_align * ctxt);
+ else
+ ubase = (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *) dd->kregbase +
+ dd->ureg_align * ctxt);
+
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &ubase[regno]);
+}
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+ const u32 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readl((u32 __iomem *) &dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+ const u32 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u32 regno, u64 value)
+{
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->kregbase[regno]);
+}
+
+/*
+ * not many sanity checks for the port-specific kernel register routines,
+ * since they are only used when it's known to be safe.
+*/
+static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
+ const u16 regno)
+{
+ if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
+ return 0ULL;
+ return readq(&ppd->cpspec->kpregbase[regno]);
+}
+
+static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
+ const u16 regno, u64 value)
+{
+ if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
+ (ppd->dd->flags & QIB_PRESENT))
+ writeq(value, &ppd->cpspec->kpregbase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+ const u16 regno, unsigned ctxt,
+ u64 value)
+{
+ qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&dd->cspec->cregbase[regno]);
+
+
+}
+
+static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&dd->cspec->cregbase[regno]);
+
+
+}
+
+static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
+ u16 regno, u64 value)
+{
+ if (ppd->cpspec && ppd->cpspec->cpregbase &&
+ (ppd->dd->flags & QIB_PRESENT))
+ writeq(value, &ppd->cpspec->cpregbase[regno]);
+}
+
+static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
+ u16 regno)
+{
+ if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
+ !(ppd->dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&ppd->cpspec->cpregbase[regno]);
+}
+
+static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
+ u16 regno)
+{
+ if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
+ !(ppd->dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&ppd->cpspec->cpregbase[regno]);
+}
+
+/* bits in Control register */
+#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
+#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
+
+/* bits in general interrupt regs */
+#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
+#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
+#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
+#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
+#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
+#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
+#define QIB_I_C_ERROR INT_MASK(Err)
+
+#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
+#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
+#define QIB_I_GPIO INT_MASK(AssertGPIO)
+#define QIB_I_P_SDMAINT(pidx) \
+ (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
+ INT_MASK_P(SDmaProgress, pidx) | \
+ INT_MASK_PM(SDmaCleanupDone, pidx))
+
+/* Interrupt bits that are "per port" */
+#define QIB_I_P_BITSEXTANT(pidx) \
+ (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
+ INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
+ INT_MASK_P(SDmaProgress, pidx) | \
+ INT_MASK_PM(SDmaCleanupDone, pidx))
+
+/* Interrupt bits that are common to a device */
+/* currently unused: QIB_I_SPIOSENT */
+#define QIB_I_C_BITSEXTANT \
+ (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
+ QIB_I_SPIOSENT | \
+ QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
+
+#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
+ QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
+
+/*
+ * Error bits that are "per port".
+ */
+#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
+#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
+#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
+#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
+#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
+#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
+#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
+#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
+#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
+#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
+#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
+#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
+#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
+#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
+#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
+#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
+#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
+#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
+#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
+#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
+#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
+#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
+#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
+#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
+#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
+#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
+#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
+#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
+
+#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
+#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
+#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
+#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
+#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
+#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
+#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
+#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
+#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
+#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
+#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
+
+/* Error bits that are common to a device */
+#define QIB_E_RESET ERR_MASK(ResetNegated)
+#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
+#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
+
+
+/*
+ * Per chip (rather than per-port) errors. Most either do
+ * nothing but trigger a print (because they self-recover, or
+ * always occur in tandem with other errors that handle the
+ * issue), or because they indicate errors with no recovery,
+ * but we want to know that they happened.
+ */
+#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
+#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
+#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
+#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
+#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
+#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
+#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
+#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
+
+/* SDMA chip errors (not per port)
+ * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
+ * the SDMAHALT error immediately, so we just print the dup error via the
+ * E_AUTO mechanism. This is true of most of the per-port fatal errors
+ * as well, but since this is port-independent, by definition, it's
+ * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
+ * packet send errors, and so are handled in the same manner as other
+ * per-packet errors.
+ */
+#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
+#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
+#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
+
+/*
+ * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
+ * it is used to print "common" packet errors.
+ */
+#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
+ QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
+ QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
+ QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
+ QIB_E_P_REBP)
+
+/* Error Bits that Packet-related (Receive, per-port) */
+#define QIB_E_P_RPKTERRS (\
+ QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
+ QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
+ QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
+ QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
+ QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
+ QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
+
+/*
+ * Error bits that are Send-related (per port)
+ * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
+ * All of these potentially need to have a buffer disarmed
+ */
+#define QIB_E_P_SPKTERRS (\
+ QIB_E_P_SUNEXP_PKTNUM |\
+ QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
+ QIB_E_P_SMAXPKTLEN |\
+ QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
+ QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
+ QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
+
+#define QIB_E_SPKTERRS ( \
+ QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
+ ERR_MASK_N(SendUnsupportedVLErr) | \
+ QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
+
+#define QIB_E_P_SDMAERRS ( \
+ QIB_E_P_SDMAHALT | \
+ QIB_E_P_SDMADESCADDRMISALIGN | \
+ QIB_E_P_SDMAUNEXPDATA | \
+ QIB_E_P_SDMAMISSINGDW | \
+ QIB_E_P_SDMADWEN | \
+ QIB_E_P_SDMARPYTAG | \
+ QIB_E_P_SDMA1STDESC | \
+ QIB_E_P_SDMABASE | \
+ QIB_E_P_SDMATAILOUTOFBOUND | \
+ QIB_E_P_SDMAOUTOFBOUND | \
+ QIB_E_P_SDMAGENMISMATCH)
+
+/*
+ * This sets some bits more than once, but makes it more obvious which
+ * bits are not handled under other categories, and the repeat definition
+ * is not a problem.
+ */
+#define QIB_E_P_BITSEXTANT ( \
+ QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
+ QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
+ QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
+ QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
+ )
+
+/*
+ * These are errors that can occur when the link
+ * changes state while a packet is being sent or received. This doesn't
+ * cover things like EBP or VCRC that can be the result of a sending
+ * having the link change state, so we receive a "known bad" packet.
+ * All of these are "per port", so renamed:
+ */
+#define QIB_E_P_LINK_PKTERRS (\
+ QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
+ QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
+ QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
+ QIB_E_P_RUNEXPCHAR)
+
+/*
+ * This sets some bits more than once, but makes it more obvious which
+ * bits are not handled under other categories (such as QIB_E_SPKTERRS),
+ * and the repeat definition is not a problem.
+ */
+#define QIB_E_C_BITSEXTANT (\
+ QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
+ QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
+ QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
+
+/* Likewise Neuter E_SPKT_ERRS_IGNORE */
+#define E_SPKT_ERRS_IGNORE 0
+
+#define QIB_EXTS_MEMBIST_DISABLED \
+ SYM_MASK(EXTStatus, MemBISTDisabled)
+#define QIB_EXTS_MEMBIST_ENDTEST \
+ SYM_MASK(EXTStatus, MemBISTEndTest)
+
+#define QIB_E_SPIOARMLAUNCH \
+ ERR_MASK(SendArmLaunchErr)
+
+#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
+#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
+
+/*
+ * IBTA_1_2 is set when multiple speeds are enabled (normal),
+ * and also if forced QDR (only QDR enabled). It's enabled for the
+ * forced QDR case so that scrambling will be enabled by the TS3
+ * exchange, when supported by both sides of the link.
+ */
+#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
+#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
+#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
+#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
+#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
+#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
+ SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
+#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
+
+#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
+#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
+
+#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
+#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
+#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
+
+#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
+#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
+#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
+ SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
+#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
+ SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
+#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
+
+#define IBA7322_REDIRECT_VEC_PER_REG 12
+
+#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
+#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
+#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
+#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
+#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
+
+#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
+
+#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
+ .msg = #fldname }
+#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
+ fldname##Mask##_##port), .msg = #fldname }
+static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
+ HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
+ HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
+ HWE_AUTO(PCIESerdesPClkNotDetect),
+ HWE_AUTO(PowerOnBISTFailed),
+ HWE_AUTO(TempsenseTholdReached),
+ HWE_AUTO(MemoryErr),
+ HWE_AUTO(PCIeBusParityErr),
+ HWE_AUTO(PcieCplTimeout),
+ HWE_AUTO(PciePoisonedTLP),
+ HWE_AUTO_P(SDmaMemReadErr, 1),
+ HWE_AUTO_P(SDmaMemReadErr, 0),
+ HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
+ HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
+ HWE_AUTO_P(statusValidNoEop, 1),
+ HWE_AUTO_P(statusValidNoEop, 0),
+ HWE_AUTO(LATriggered),
+ { .mask = 0 }
+};
+
+#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
+ .msg = #fldname }
+#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
+ .msg = #fldname }
+static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
+ E_AUTO(ResetNegated),
+ E_AUTO(HardwareErr),
+ E_AUTO(InvalidAddrErr),
+ E_AUTO(SDmaVL15Err),
+ E_AUTO(SBufVL15MisUseErr),
+ E_AUTO(InvalidEEPCmd),
+ E_AUTO(RcvContextShareErr),
+ E_AUTO(SendVLMismatchErr),
+ E_AUTO(SendArmLaunchErr),
+ E_AUTO(SendSpecialTriggerErr),
+ E_AUTO(SDmaWrongPortErr),
+ E_AUTO(SDmaBufMaskDuplicateErr),
+ E_AUTO(RcvHdrFullErr),
+ E_AUTO(RcvEgrFullErr),
+ { .mask = 0 }
+};
+
+static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
+ E_P_AUTO(IBStatusChanged),
+ E_P_AUTO(SHeadersErr),
+ E_P_AUTO(VL15BufMisuseErr),
+ /*
+ * SDmaHaltErr is not really an error, make it clearer;
+ */
+ {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
+ E_P_AUTO(SDmaDescAddrMisalignErr),
+ E_P_AUTO(SDmaUnexpDataErr),
+ E_P_AUTO(SDmaMissingDwErr),
+ E_P_AUTO(SDmaDwEnErr),
+ E_P_AUTO(SDmaRpyTagErr),
+ E_P_AUTO(SDma1stDescErr),
+ E_P_AUTO(SDmaBaseErr),
+ E_P_AUTO(SDmaTailOutOfBoundErr),
+ E_P_AUTO(SDmaOutOfBoundErr),
+ E_P_AUTO(SDmaGenMismatchErr),
+ E_P_AUTO(SendBufMisuseErr),
+ E_P_AUTO(SendUnsupportedVLErr),
+ E_P_AUTO(SendUnexpectedPktNumErr),
+ E_P_AUTO(SendDroppedDataPktErr),
+ E_P_AUTO(SendDroppedSmpPktErr),
+ E_P_AUTO(SendPktLenErr),
+ E_P_AUTO(SendUnderRunErr),
+ E_P_AUTO(SendMaxPktLenErr),
+ E_P_AUTO(SendMinPktLenErr),
+ E_P_AUTO(RcvIBLostLinkErr),
+ E_P_AUTO(RcvHdrErr),
+ E_P_AUTO(RcvHdrLenErr),
+ E_P_AUTO(RcvBadTidErr),
+ E_P_AUTO(RcvBadVersionErr),
+ E_P_AUTO(RcvIBFlowErr),
+ E_P_AUTO(RcvEBPErr),
+ E_P_AUTO(RcvUnsupportedVLErr),
+ E_P_AUTO(RcvUnexpectedCharErr),
+ E_P_AUTO(RcvShortPktLenErr),
+ E_P_AUTO(RcvLongPktLenErr),
+ E_P_AUTO(RcvMaxPktLenErr),
+ E_P_AUTO(RcvMinPktLenErr),
+ E_P_AUTO(RcvICRCErr),
+ E_P_AUTO(RcvVCRCErr),
+ E_P_AUTO(RcvFormatErr),
+ { .mask = 0 }
+};
+
+/*
+ * Below generates "auto-message" for interrupts not specific to any port or
+ * context
+ */
+#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
+ .msg = #fldname }
+/* Below generates "auto-message" for interrupts specific to a port */
+#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
+ SYM_LSB(IntMask, fldname##Mask##_0), \
+ SYM_LSB(IntMask, fldname##Mask##_1)), \
+ .msg = #fldname "_P" }
+/* For some reason, the SerDesTrimDone bits are reversed */
+#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
+ SYM_LSB(IntMask, fldname##Mask##_1), \
+ SYM_LSB(IntMask, fldname##Mask##_0)), \
+ .msg = #fldname "_P" }
+/*
+ * Below generates "auto-message" for interrupts specific to a context,
+ * with ctxt-number appended
+ */
+#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
+ SYM_LSB(IntMask, fldname##0IntMask), \
+ SYM_LSB(IntMask, fldname##17IntMask)), \
+ .msg = #fldname "_C"}
+
+static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
+ INTR_AUTO_P(SDmaInt),
+ INTR_AUTO_P(SDmaProgressInt),
+ INTR_AUTO_P(SDmaIdleInt),
+ INTR_AUTO_P(SDmaCleanupDone),
+ INTR_AUTO_C(RcvUrg),
+ INTR_AUTO_P(ErrInt),
+ INTR_AUTO(ErrInt), /* non-port-specific errs */
+ INTR_AUTO(AssertGPIOInt),
+ INTR_AUTO_P(SendDoneInt),
+ INTR_AUTO(SendBufAvailInt),
+ INTR_AUTO_C(RcvAvail),
+ { .mask = 0 }
+};
+
+#define TXSYMPTOM_AUTO_P(fldname) \
+ { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
+static const struct qib_hwerror_msgs hdrchk_msgs[] = {
+ TXSYMPTOM_AUTO_P(NonKeyPacket),
+ TXSYMPTOM_AUTO_P(GRHFail),
+ TXSYMPTOM_AUTO_P(PkeyFail),
+ TXSYMPTOM_AUTO_P(QPFail),
+ TXSYMPTOM_AUTO_P(SLIDFail),
+ TXSYMPTOM_AUTO_P(RawIPV6),
+ TXSYMPTOM_AUTO_P(PacketTooSmall),
+ { .mask = 0 }
+};
+
+#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used,
+ * because we don't need to force the update of pioavail
+ */
+static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 i;
+ int any;
+ u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+ u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ unsigned long sbuf[4];
+
+ /*
+ * It's possible that sendbuffererror could have bits set; might
+ * have already done this as a result of hardware error handling.
+ */
+ any = 0;
+ for (i = 0; i < regcnt; ++i) {
+ sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
+ if (sbuf[i]) {
+ any = 1;
+ qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
+ }
+ }
+
+ if (any)
+ qib_disarm_piobufs_set(dd, sbuf, piobcnt);
+}
+
+/* No txe_recover yet, if ever */
+
+/* No decode__errors yet */
+static void err_decode(char *msg, size_t len, u64 errs,
+ const struct qib_hwerror_msgs *msp)
+{
+ u64 these, lmask;
+ int took, multi, n = 0;
+
+ while (msp && msp->mask) {
+ multi = (msp->mask & (msp->mask - 1));
+ while (errs & msp->mask) {
+ these = (errs & msp->mask);
+ lmask = (these & (these - 1)) ^ these;
+ if (len) {
+ if (n++) {
+ /* separate the strings */
+ *msg++ = ',';
+ len--;
+ }
+ took = scnprintf(msg, len, "%s", msp->msg);
+ len -= took;
+ msg += took;
+ }
+ errs &= ~lmask;
+ if (len && multi) {
+ /* More than one bit this mask */
+ int idx = -1;
+
+ while (lmask & msp->mask) {
+ ++idx;
+ lmask >>= 1;
+ }
+ took = scnprintf(msg, len, "_%d", idx);
+ len -= took;
+ msg += took;
+ }
+ }
+ ++msp;
+ }
+ /* If some bits are left, show in hex. */
+ if (len && errs)
+ snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
+ (unsigned long long) errs);
+}
+
+/* only called if r1 set */
+static void flush_fifo(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 __iomem *piobuf;
+ u32 bufn;
+ u32 *hdr;
+ u64 pbc;
+ const unsigned hdrwords = 7;
+ static struct qib_ib_header ibhdr = {
+ .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
+ .lrh[1] = IB_LID_PERMISSIVE,
+ .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
+ .lrh[3] = IB_LID_PERMISSIVE,
+ .u.oth.bth[0] = cpu_to_be32(
+ (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
+ .u.oth.bth[1] = cpu_to_be32(0),
+ .u.oth.bth[2] = cpu_to_be32(0),
+ .u.oth.u.ud.deth[0] = cpu_to_be32(0),
+ .u.oth.u.ud.deth[1] = cpu_to_be32(0),
+ };
+
+ /*
+ * Send a dummy VL15 packet to flush the launch FIFO.
+ * This will not actually be sent since the TxeBypassIbc bit is set.
+ */
+ pbc = PBC_7322_VL15_SEND |
+ (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
+ (hdrwords + SIZE_OF_CRC);
+ piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
+ if (!piobuf)
+ return;
+ writeq(pbc, piobuf);
+ hdr = (u32 *) &ibhdr;
+ if (dd->flags & QIB_PIO_FLUSH_WC) {
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
+ qib_flush_wc();
+ __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
+ qib_flush_wc();
+ } else
+ qib_pio_copy(piobuf + 2, hdr, hdrwords);
+ qib_sendbuf_done(dd, bufn);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 set_sendctrl = 0;
+ u64 clr_sendctrl = 0;
+
+ if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_HALT)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
+ SYM_MASK(SendCtrl_0, TxeAbortIbc) |
+ SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
+ SYM_MASK(SendCtrl_0, TxeAbortIbc) |
+ SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
+
+ spin_lock(&dd->sendctrl_lock);
+
+ /* If we are draining everything, block sends first */
+ if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
+ ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ ppd->p_sendctrl |= set_sendctrl;
+ ppd->p_sendctrl &= ~clr_sendctrl;
+
+ if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
+ qib_write_kreg_port(ppd, krp_sendctrl,
+ ppd->p_sendctrl |
+ SYM_MASK(SendCtrl_0, SDmaCleanup));
+ else
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock(&dd->sendctrl_lock);
+
+ if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
+ flush_fifo(ppd);
+}
+
+static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
+{
+ __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
+}
+
+static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
+{
+ /*
+ * Set SendDmaLenGen and clear and set
+ * the MSB of the generation count to enable generation checking
+ * and load the internal generation counter.
+ */
+ qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
+ qib_write_kreg_port(ppd, krp_senddmalengen,
+ ppd->sdma_descq_cnt |
+ (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
+}
+
+/*
+ * Must be called with sdma_lock held, or before init finished.
+ */
+static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
+{
+ /* Commit writes to memory and advance the tail on the chip */
+ wmb();
+ ppd->sdma_descq_tail = tail;
+ qib_write_kreg_port(ppd, krp_senddmatail, tail);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+ /*
+ * Drain all FIFOs.
+ * The hardware doesn't require this but we do it so that verbs
+ * and user applications don't wait for link active to send stale
+ * data.
+ */
+ sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
+
+ qib_sdma_7322_setlengen(ppd);
+ qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
+ ppd->sdma_head_dma[0] = 0;
+ qib_7322_sdma_sendctrl(ppd,
+ ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
+}
+
+#define DISABLES_SDMA ( \
+ QIB_E_P_SDMAHALT | \
+ QIB_E_P_SDMADESCADDRMISALIGN | \
+ QIB_E_P_SDMAMISSINGDW | \
+ QIB_E_P_SDMADWEN | \
+ QIB_E_P_SDMARPYTAG | \
+ QIB_E_P_SDMA1STDESC | \
+ QIB_E_P_SDMABASE | \
+ QIB_E_P_SDMATAILOUTOFBOUND | \
+ QIB_E_P_SDMAOUTOFBOUND | \
+ QIB_E_P_SDMAGENMISMATCH)
+
+static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
+{
+ unsigned long flags;
+ struct qib_devdata *dd = ppd->dd;
+
+ errs &= QIB_E_P_SDMAERRS;
+
+ if (errs & QIB_E_P_SDMAUNEXPDATA)
+ qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
+ ppd->port);
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ switch (ppd->sdma_state.current_state) {
+ case qib_sdma_state_s00_hw_down:
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ if (errs & QIB_E_P_SDMAHALT)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e20_hw_started);
+ break;
+
+ case qib_sdma_state_s20_idle:
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ if (errs & QIB_E_P_SDMAHALT)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e50_hw_cleaned);
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ if (errs & QIB_E_P_SDMAHALT)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e60_hw_halted);
+ break;
+
+ case qib_sdma_state_s99_running:
+ __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
+ __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
+ break;
+ }
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * handle per-device errors (not per-port errors)
+ */
+static noinline void handle_7322_errors(struct qib_devdata *dd)
+{
+ char *msg;
+ u64 iserr = 0;
+ u64 errs;
+ u64 mask;
+ int log_idx;
+
+ qib_stats.sps_errints++;
+ errs = qib_read_kreg64(dd, kr_errstatus);
+ if (!errs) {
+ qib_devinfo(dd->pcidev, "device error interrupt, "
+ "but no error bits set!\n");
+ goto done;
+ }
+
+ /* don't report errors that are masked */
+ errs &= dd->cspec->errormask;
+ msg = dd->cspec->emsgbuf;
+
+ /* do these first, they are most important */
+ if (errs & QIB_E_HARDWARE) {
+ *msg = '\0';
+ qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+ } else
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ if (errs & QIB_E_SPKTERRS) {
+ qib_disarm_7322_senderrbufs(dd->pport);
+ qib_stats.sps_txerrs++;
+ } else if (errs & QIB_E_INVALIDADDR)
+ qib_stats.sps_txerrs++;
+ else if (errs & QIB_E_ARMLAUNCH) {
+ qib_stats.sps_txerrs++;
+ qib_disarm_7322_senderrbufs(dd->pport);
+ }
+ qib_write_kreg(dd, kr_errclear, errs);
+
+ /*
+ * The ones we mask off are handled specially below
+ * or above. Also mask SDMADISABLED by default as it
+ * is too chatty.
+ */
+ mask = QIB_E_HARDWARE;
+ *msg = '\0';
+
+ err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
+ qib_7322error_msgs);
+
+ /*
+ * Getting reset is a tragedy for all ports. Mark the device
+ * _and_ the ports as "offline" in way meaningful to each.
+ */
+ if (errs & QIB_E_RESET) {
+ int pidx;
+
+ qib_dev_err(dd, "Got reset, requires re-init "
+ "(unload and reload driver)\n");
+ dd->flags &= ~QIB_INITTED; /* needs re-init */
+ /* mark as having had error */
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
+ }
+
+ if (*msg && iserr)
+ qib_dev_err(dd, "%s error\n", msg);
+
+ /*
+ * If there were hdrq or egrfull errors, wake up any processes
+ * waiting in poll. We used to try to check which contexts had
+ * the overflow, but given the cost of that and the chip reads
+ * to support it, it's better to just wake everybody up if we
+ * get an overflow; waiters can poll again if it's not them.
+ */
+ if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+ qib_handle_urcv(dd, ~0U);
+ if (errs & ERR_MASK(RcvEgrFullErr))
+ qib_stats.sps_buffull++;
+ else
+ qib_stats.sps_hdrfull++;
+ }
+
+done:
+ return;
+}
+
+static void reenable_chase(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+ ppd->cpspec->chase_timer.expires = 0;
+ qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_POLL);
+}
+
+static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
+{
+ ppd->cpspec->chase_end = 0;
+
+ if (!qib_chase)
+ return;
+
+ qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
+ add_timer(&ppd->cpspec->chase_timer);
+}
+
+static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
+{
+ u8 ibclt;
+ u64 tnow;
+
+ ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
+
+ /*
+ * Detect and handle the state chase issue, where we can
+ * get stuck if we are unlucky on timing on both sides of
+ * the link. If we are, we disable, set a timer, and
+ * then re-enable.
+ */
+ switch (ibclt) {
+ case IB_7322_LT_STATE_CFGRCVFCFG:
+ case IB_7322_LT_STATE_CFGWAITRMT:
+ case IB_7322_LT_STATE_TXREVLANES:
+ case IB_7322_LT_STATE_CFGENH:
+ tnow = get_jiffies_64();
+ if (ppd->cpspec->chase_end &&
+ time_after64(tnow, ppd->cpspec->chase_end))
+ disable_chase(ppd, tnow, ibclt);
+ else if (!ppd->cpspec->chase_end)
+ ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
+ break;
+ default:
+ ppd->cpspec->chase_end = 0;
+ break;
+ }
+
+ if (ibclt == IB_7322_LT_STATE_CFGTEST &&
+ (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
+ force_h1(ppd);
+ ppd->cpspec->qdr_reforce = 1;
+ } else if (ppd->cpspec->qdr_reforce &&
+ (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
+ (ibclt == IB_7322_LT_STATE_CFGENH ||
+ ibclt == IB_7322_LT_STATE_CFGIDLE ||
+ ibclt == IB_7322_LT_STATE_LINKUP))
+ force_h1(ppd);
+
+ if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
+ ppd->link_speed_enabled == QIB_IB_QDR &&
+ (ibclt == IB_7322_LT_STATE_CFGTEST ||
+ ibclt == IB_7322_LT_STATE_CFGENH ||
+ (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
+ ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
+ adj_tx_serdes(ppd);
+
+ if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
+ ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
+ ppd->cpspec->qdr_dfe_on = 1;
+ ppd->cpspec->qdr_dfe_time = 0;
+ /* On link down, reenable QDR adaptation */
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_DOWN_R1 :
+ QDR_STATIC_ADAPT_DOWN);
+ }
+}
+
+/*
+ * This is per-pport error handling.
+ * will likely get it's own MSIx interrupt (one for each port,
+ * although just a single handler).
+ */
+static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
+{
+ char *msg;
+ u64 ignore_this_time = 0, iserr = 0, errs, fmask;
+ struct qib_devdata *dd = ppd->dd;
+
+ /* do this as soon as possible */
+ fmask = qib_read_kreg64(dd, kr_act_fmask);
+ if (!fmask)
+ check_7322_rxe_status(ppd);
+
+ errs = qib_read_kreg_port(ppd, krp_errstatus);
+ if (!errs)
+ qib_devinfo(dd->pcidev,
+ "Port%d error interrupt, but no error bits set!\n",
+ ppd->port);
+ if (!fmask)
+ errs &= ~QIB_E_P_IBSTATUSCHANGED;
+ if (!errs)
+ goto done;
+
+ msg = ppd->cpspec->epmsgbuf;
+ *msg = '\0';
+
+ if (errs & ~QIB_E_P_BITSEXTANT) {
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+ errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
+ if (!*msg)
+ snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
+ "no others");
+ qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
+ " errors 0x%016Lx set (and %s)\n",
+ (errs & ~QIB_E_P_BITSEXTANT), msg);
+ *msg = '\0';
+ }
+
+ if (errs & QIB_E_P_SHDR) {
+ u64 symptom;
+
+ /* determine cause, then write to clear */
+ symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
+ qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
+ hdrchk_msgs);
+ *msg = '\0';
+ /* senderrbuf cleared in SPKTERRS below */
+ }
+
+ if (errs & QIB_E_P_SPKTERRS) {
+ if ((errs & QIB_E_P_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when trying to bring the link
+ * up, but the IB link changes state at the "wrong"
+ * time. The IB logic then complains that the packet
+ * isn't valid. We don't want to confuse people, so
+ * we just don't print them, except at debug
+ */
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+ (errs & QIB_E_P_LINK_PKTERRS),
+ qib_7322p_error_msgs);
+ *msg = '\0';
+ ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
+ }
+ qib_disarm_7322_senderrbufs(ppd);
+ } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when SMA is trying to bring the link
+ * up, but the IB link changes state at the "wrong" time.
+ * The IB logic then complains that the packet isn't
+ * valid. We don't want to confuse people, so we just
+ * don't print them, except at debug
+ */
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
+ qib_7322p_error_msgs);
+ ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
+ *msg = '\0';
+ }
+
+ qib_write_kreg_port(ppd, krp_errclear, errs);
+
+ errs &= ~ignore_this_time;
+ if (!errs)
+ goto done;
+
+ if (errs & QIB_E_P_RPKTERRS)
+ qib_stats.sps_rcverrs++;
+ if (errs & QIB_E_P_SPKTERRS)
+ qib_stats.sps_txerrs++;
+
+ iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
+
+ if (errs & QIB_E_P_SDMAERRS)
+ sdma_7322_p_errors(ppd, errs);
+
+ if (errs & QIB_E_P_IBSTATUSCHANGED) {
+ u64 ibcs;
+ u8 ltstate;
+
+ ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
+ ltstate = qib_7322_phys_portstate(ibcs);
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ handle_serdes_issues(ppd, ibcs);
+ if (!(ppd->cpspec->ibcctrl_a &
+ SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
+ /*
+ * We got our interrupt, so init code should be
+ * happy and not try alternatives. Now squelch
+ * other "chatter" from link-negotiation (pre Init)
+ */
+ ppd->cpspec->ibcctrl_a |=
+ SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ }
+
+ /* Update our picture of width and speed from chip */
+ ppd->link_width_active =
+ (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
+ IB_WIDTH_4X : IB_WIDTH_1X;
+ ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
+ LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
+ SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
+ QIB_IB_DDR : QIB_IB_SDR;
+
+ if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
+ IB_PHYSPORTSTATE_DISABLED)
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ else
+ /*
+ * Since going into a recovery state causes the link
+ * state to go down and since recovery is transitory,
+ * it is better if we "miss" ever seeing the link
+ * training state go into recovery (i.e., ignore this
+ * transition for link state special handling purposes)
+ * without updating lastibcstat.
+ */
+ if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
+ qib_handle_e_ibstatuschanged(ppd, ibcs);
+ }
+ if (*msg && iserr)
+ qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+ if (ppd->state_wanted & ppd->lflags)
+ wake_up_interruptible(&ppd->state_wait);
+done:
+ return;
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ if (dd->flags & QIB_BADINTR)
+ return;
+ qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
+ /* cause any pending enabled interrupts to be re-delivered */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ if (dd->cspec->num_msix_entries) {
+ /* and same for MSIx */
+ u64 val = qib_read_kreg64(dd, kr_intgranted);
+ if (val)
+ qib_write_kreg(dd, kr_intgranted, val);
+ }
+ } else
+ qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips.
+ */
+static void qib_7322_clear_freeze(struct qib_devdata *dd)
+{
+ int pidx;
+
+ /* disable error interrupts, to avoid confusion */
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ qib_write_kreg_port(dd->pport + pidx, krp_errmask,
+ 0ULL);
+
+ /* also disable interrupts; errormask is sometimes overwriten */
+ qib_7322_set_intr_state(dd, 0);
+
+ /* clear the freeze, and be sure chip saw it */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+
+ /*
+ * Force new interrupt if any hwerr, error or interrupt bits are
+ * still set, and clear "safe" send packet errors related to freeze
+ * and cancelling sends. Re-enable error interrupts before possible
+ * force of re-interrupt on pending interrupts.
+ */
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+ /* We need to purge per-port errs and reset mask, too */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ if (!dd->pport[pidx].link_speed_supported)
+ continue;
+ qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
+ qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
+ }
+ qib_7322_set_intr_state(dd, 1);
+}
+
+/* no error handling to speak of */
+/**
+ * qib_7322_handle_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use. Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue. We reuse the same message buffer as
+ * qib_handle_errors() to avoid excessive stack usage.
+ */
+static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
+ size_t msgl)
+{
+ u64 hwerrs;
+ u32 ctrl;
+ int isfatal = 0;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (!hwerrs)
+ goto bail;
+ if (hwerrs == ~0ULL) {
+ qib_dev_err(dd, "Read of hardware error status failed "
+ "(all bits set); ignoring\n");
+ goto bail;
+ }
+ qib_stats.sps_hwerrs++;
+
+ /* Always clear the error status register, except BIST fail */
+ qib_write_kreg(dd, kr_hwerrclear, hwerrs &
+ ~HWE_MASK(PowerOnBISTFailed));
+
+ hwerrs &= dd->cspec->hwerrmask;
+
+ /* no EEPROM logging, yet */
+
+ if (hwerrs)
+ qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+ "(cleared)\n", (unsigned long long) hwerrs);
+
+ ctrl = qib_read_kreg32(dd, kr_control);
+ if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
+ /*
+ * No recovery yet...
+ */
+ if ((hwerrs & ~HWE_MASK(LATriggered)) ||
+ dd->cspec->stay_in_freeze) {
+ /*
+ * If any set that we aren't ignoring only make the
+ * complaint once, in case it's stuck or recurring,
+ * and we get here multiple times
+ * Force link down, so switch knows, and
+ * LEDs are turned off.
+ */
+ if (dd->flags & QIB_INITTED)
+ isfatal = 1;
+ } else
+ qib_7322_clear_freeze(dd);
+ }
+
+ if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+ isfatal = 1;
+ strlcpy(msg, "[Memory BIST test failed, "
+ "InfiniPath hardware unusable]", msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
+
+ /* Ignore esoteric PLL failures et al. */
+
+ qib_dev_err(dd, "%s hardware error\n", msg);
+
+ if (isfatal && !dd->diag_client) {
+ qib_dev_err(dd, "Fatal Hardware Error, no longer"
+ " usable, SN %.16s\n", dd->serial);
+ /*
+ * for /sys status file and user programs to print; if no
+ * trailing brace is copied, we'll know it was truncated.
+ */
+ if (dd->freezemsg)
+ snprintf(dd->freezemsg, dd->freezelen,
+ "{%s}", msg);
+ qib_disable_after_error(dd);
+ }
+bail:;
+}
+
+/**
+ * qib_7322_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_7322_init_hwerrors(struct qib_devdata *dd)
+{
+ int pidx;
+ u64 extsval;
+
+ extsval = qib_read_kreg64(dd, kr_extstatus);
+ if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
+ QIB_EXTS_MEMBIST_ENDTEST)))
+ qib_dev_err(dd, "MemBIST did not complete!\n");
+
+ /* never clear BIST failure, so reported on each driver load */
+ qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+ /* clear all */
+ qib_write_kreg(dd, kr_errclear, ~0ULL);
+ /* enable errors that are masked, at least this first time. */
+ qib_write_kreg(dd, kr_errmask, ~0ULL);
+ dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ qib_write_kreg_port(dd->pport + pidx, krp_errmask,
+ ~0ULL);
+}
+
+/*
+ * Disable and enable the armlaunch error. Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based. There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
+ dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
+ } else
+ dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd)
+{
+ u64 mod_wd;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+ /*
+ * If we are told to disable, note that so link-recovery
+ * code does not attempt to bring us back up.
+ * Also reset everything that we can, so we start
+ * completely clean when re-enabled (before we
+ * actually issue the disable to the IBC)
+ */
+ qib_7322_mini_pcs_reset(ppd);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+ /*
+ * Any other linkinitcmd will lead to LINKDOWN and then
+ * to INIT (if all is well), so clear flag to let
+ * link-recovery code attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ /*
+ * Clear status change interrupt reduction so the
+ * new state is seen.
+ */
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+ }
+
+ mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
+ (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
+ mod_wd);
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+
+}
+
+/*
+ * The total RCV buffer memory is 64KB, used for both ports, and is
+ * in units of 64 bytes (same as IB flow control credit unit).
+ * The consumedVL unit in the same registers are in 32 byte units!
+ * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
+ * and we can therefore allocate just 9 IB credits for 2 VL15 packets
+ * in krp_rxcreditvl15, rather than 10.
+ */
+#define RCV_BUF_UNITSZ 64
+#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
+
+static void set_vls(struct qib_pportdata *ppd)
+{
+ int i, numvls, totcred, cred_vl, vl0extra;
+ struct qib_devdata *dd = ppd->dd;
+ u64 val;
+
+ numvls = qib_num_vls(ppd->vls_operational);
+
+ /*
+ * Set up per-VL credits. Below is kluge based on these assumptions:
+ * 1) port is disabled at the time early_init is called.
+ * 2) give VL15 17 credits, for two max-plausible packets.
+ * 3) Give VL0-N the rest, with any rounding excess used for VL0
+ */
+ /* 2 VL15 packets @ 288 bytes each (including IB headers) */
+ totcred = NUM_RCV_BUF_UNITS(dd);
+ cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
+ totcred -= cred_vl;
+ qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
+ cred_vl = totcred / numvls;
+ vl0extra = totcred - cred_vl * numvls;
+ qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
+ for (i = 1; i < numvls; i++)
+ qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
+ for (; i < 8; i++) /* no buffer space for other VLs */
+ qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
+
+ /* Notify IBC that credits need to be recalculated */
+ val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
+ val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
+ qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
+ qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+
+ for (i = 0; i < numvls; i++)
+ val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
+ val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
+
+ /* Change the number of operational VLs */
+ ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
+ ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
+ ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+}
+
+/*
+ * The code that deals with actual SerDes is in serdes_7322_init().
+ * Compared to the code for iba7220, it is minimal.
+ */
+static int serdes_7322_init(struct qib_pportdata *ppd);
+
+/**
+ * qib_7322_bringup_serdes - bring up the serdes
+ * @ppd: physical port on the qlogic_ib device
+ */
+static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val, guid, ibc;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * SerDes model not in Pd, but still need to
+ * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
+ * eventually.
+ */
+ /* Put IBC in reset, sends disabled (should be in reset already) */
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+
+ if (qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
+ crp_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov);
+ }
+
+ /* flowcontrolwatermark is in units of KBytes */
+ ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
+ /*
+ * Flow control is sent this often, even if no changes in
+ * buffer space occur. Units are 128ns for this chip.
+ * Set to 3usec.
+ */
+ ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
+ /* max error tolerance */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
+ /* IB credit flow control. */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
+ /*
+ * set initial max size pkt IBC will send, including ICRC; it's the
+ * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+ */
+ ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
+ SYM_LSB(IBCCtrlA_0, MaxPktLen);
+ ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
+
+ /* initially come up waiting for TS1, without sending anything. */
+ val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ /*
+ * Reset the PCS interface to the serdes (and also ibc, which is still
+ * in reset from above). Writes new value of ibcctrl_a as last step.
+ */
+ qib_7322_mini_pcs_reset(ppd);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+
+ if (!ppd->cpspec->ibcctrl_b) {
+ unsigned lse = ppd->link_speed_enabled;
+
+ /*
+ * Not on re-init after reset, establish shadow
+ * and force initial config.
+ */
+ ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
+ krp_ibcctrl_b);
+ ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
+ IBA7322_IBC_SPEED_DDR |
+ IBA7322_IBC_SPEED_SDR |
+ IBA7322_IBC_WIDTH_AUTONEG |
+ SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
+ if (lse & (lse - 1)) /* Muliple speeds enabled */
+ ppd->cpspec->ibcctrl_b |=
+ (lse << IBA7322_IBC_SPEED_LSB) |
+ IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ else
+ ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
+ IBA7322_IBC_SPEED_QDR |
+ IBA7322_IBC_IBTA_1_2_MASK :
+ (lse == QIB_IB_DDR) ?
+ IBA7322_IBC_SPEED_DDR :
+ IBA7322_IBC_SPEED_SDR;
+ if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
+ (IB_WIDTH_1X | IB_WIDTH_4X))
+ ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
+ else
+ ppd->cpspec->ibcctrl_b |=
+ ppd->link_width_enabled == IB_WIDTH_4X ?
+ IBA7322_IBC_WIDTH_4X_ONLY :
+ IBA7322_IBC_WIDTH_1X_ONLY;
+
+ /* always enable these on driver reload, not sticky */
+ ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
+ IBA7322_IBC_HRTBT_MASK);
+ }
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+
+ /* setup so we have more time at CFGTEST to change H1 */
+ val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
+ val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
+ val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
+ qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
+
+ serdes_7322_init(ppd);
+
+ guid = be64_to_cpu(ppd->guid);
+ if (!guid) {
+ if (dd->base_guid)
+ guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
+ ppd->guid = cpu_to_be64(guid);
+ }
+
+ qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ /* Enable port */
+ ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
+ set_vls(ppd);
+
+ /* be paranoid against later code motion, etc. */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
+ qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* Also enable IBSTATUSCHG interrupt. */
+ val = qib_read_kreg_port(ppd, krp_errmask);
+ qib_write_kreg_port(ppd, krp_errmask,
+ val | ERR_MASK_N(IBStatusChanged));
+
+ /* Always zero until we start messing with SerDes for real */
+ return ret;
+}
+
+/**
+ * qib_7322_quiet_serdes - set serdes to txidle
+ * @dd: the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
+{
+ u64 val;
+ unsigned long flags;
+
+ qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ cancel_delayed_work(&ppd->cpspec->autoneg_work);
+ if (ppd->dd->cspec->r1)
+ cancel_delayed_work(&ppd->cpspec->ipg_work);
+ flush_scheduled_work();
+
+ ppd->cpspec->chase_end = 0;
+ if (ppd->cpspec->chase_timer.data) /* if initted */
+ del_timer_sync(&ppd->cpspec->chase_timer);
+
+ /*
+ * Despite the name, actually disables IBC as well. Do it when
+ * we are as sure as possible that no more packets can be
+ * received, following the down and the PCS reset.
+ * The actual disabling happens in qib_7322_mini_pci_reset(),
+ * along with the PCS being reset.
+ */
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
+ qib_7322_mini_pcs_reset(ppd);
+
+ /*
+ * Update the adjusted counters so the adjustment persists
+ * across driver reload.
+ */
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
+ ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
+ struct qib_devdata *dd = ppd->dd;
+ u64 diagc;
+
+ /* enable counter writes */
+ diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+ qib_write_kreg(dd, kr_hwdiagctrl,
+ diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->ibsymsnap;
+ val -= ppd->cpspec->ibsymdelta;
+ write_7322_creg_port(ppd, crp_ibsymbolerr, val);
+ }
+ if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->iblnkerrsnap;
+ val -= ppd->cpspec->iblnkerrdelta;
+ write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
+ }
+ if (ppd->cpspec->iblnkdowndelta) {
+ val = read_7322_creg32_port(ppd, crp_iblinkdown);
+ val += ppd->cpspec->iblnkdowndelta;
+ write_7322_creg_port(ppd, crp_iblinkdown, val);
+ }
+ /*
+ * No need to save ibmalfdelta since IB perfcounters
+ * are cleared on driver reload.
+ */
+
+ /* and disable counter writes */
+ qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+ }
+}
+
+/**
+ * qib_setup_7322_setextled - set the state of the two external LEDs
+ * @ppd: physical port on the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+ *
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note: We try to match the Mellanox HCA LED behavior as best
+ * we can. Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate. That's
+ * visible overhead, so not something we will do.
+ */
+static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 extctl, ledblink = 0, val;
+ unsigned long flags;
+ int yel, grn;
+
+ /*
+ * The diags use the LED to indicate diag info, so we leave
+ * the external LED alone when the diags are running.
+ */
+ if (dd->diag_client)
+ return;
+
+ /* Allow override of LED display for, e.g. Locating system in rack */
+ if (ppd->led_override) {
+ grn = (ppd->led_override & QIB_LED_PHYS);
+ yel = (ppd->led_override & QIB_LED_LOG);
+ } else if (on) {
+ val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
+ grn = qib_7322_phys_portstate(val) ==
+ IB_PHYSPORTSTATE_LINKUP;
+ yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
+ } else {
+ grn = 0;
+ yel = 0;
+ }
+
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ extctl = dd->cspec->extctrl & (ppd->port == 1 ?
+ ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
+ if (grn) {
+ extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
+ /*
+ * Counts are in chip clock (4ns) periods.
+ * This is 1/16 sec (66.6ms) on,
+ * 3/16 sec (187.5 ms) off, with packets rcvd.
+ */
+ ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
+ ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
+ }
+ if (yel)
+ extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
+ dd->cspec->extctrl = extctl;
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+
+ if (ledblink) /* blink the LED on packet receive */
+ qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
+}
+
+/*
+ * Disable MSIx interrupt if enabled, call generic MSIx code
+ * to cleanup, and clear pending MSIx interrupts.
+ * Used for fallback to INTx, after reset, and when MSIx setup fails.
+ */
+static void qib_7322_nomsix(struct qib_devdata *dd)
+{
+ u64 intgranted;
+ int n;
+
+ dd->cspec->main_int_mask = ~0ULL;
+ n = dd->cspec->num_msix_entries;
+ if (n) {
+ int i;
+
+ dd->cspec->num_msix_entries = 0;
+ for (i = 0; i < n; i++)
+ free_irq(dd->cspec->msix_entries[i].vector,
+ dd->cspec->msix_arg[i]);
+ qib_nomsix(dd);
+ }
+ /* make sure no MSIx interrupts are left pending */
+ intgranted = qib_read_kreg64(dd, kr_intgranted);
+ if (intgranted)
+ qib_write_kreg(dd, kr_intgranted, intgranted);
+}
+
+static void qib_7322_free_irq(struct qib_devdata *dd)
+{
+ if (dd->cspec->irq) {
+ free_irq(dd->cspec->irq, dd);
+ dd->cspec->irq = 0;
+ }
+ qib_7322_nomsix(dd);
+}
+
+static void qib_setup_7322_cleanup(struct qib_devdata *dd)
+{
+ int i;
+
+ qib_7322_free_irq(dd);
+ kfree(dd->cspec->cntrs);
+ kfree(dd->cspec->sendchkenable);
+ kfree(dd->cspec->sendgrhchk);
+ kfree(dd->cspec->sendibchk);
+ kfree(dd->cspec->msix_entries);
+ kfree(dd->cspec->msix_arg);
+ for (i = 0; i < dd->num_pports; i++) {
+ unsigned long flags;
+ u32 mask = QSFP_GPIO_MOD_PRS_N |
+ (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
+
+ kfree(dd->pport[i].cpspec->portcntrs);
+ if (dd->flags & QIB_HAS_QSFP) {
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->gpio_mask &= ~mask;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
+ }
+ if (dd->pport[i].ibport_data.smi_ah)
+ ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
+ }
+}
+
+/* handle SDMA interrupts */
+static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
+{
+ struct qib_pportdata *ppd0 = &dd->pport[0];
+ struct qib_pportdata *ppd1 = &dd->pport[1];
+ u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
+ INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
+ u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
+ INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
+
+ if (intr0)
+ qib_sdma_intr(ppd0);
+ if (intr1)
+ qib_sdma_intr(ppd1);
+
+ if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
+ qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
+ if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
+ qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
+}
+
+/*
+ * Set or clear the Send buffer available interrupt enable bit.
+ */
+static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (needint)
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
+ else
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * Somehow got an interrupt with reserved bits set in interrupt status.
+ * Print a message so we know it happened, then clear them.
+ * keep mainline interrupt handler cache-friendly
+ */
+static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
+{
+ u64 kills;
+ char msg[128];
+
+ kills = istat & ~QIB_I_BITSEXTANT;
+ qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
+ " %s\n", (unsigned long long) kills, msg);
+ qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
+}
+
+/* keep mainline interrupt handler cache-friendly */
+static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
+{
+ u32 gpiostatus;
+ int handled = 0;
+ int pidx;
+
+ /*
+ * Boards for this chip currently don't use GPIO interrupts,
+ * so clear by writing GPIOstatus to GPIOclear, and complain
+ * to developer. To avoid endless repeats, clear
+ * the bits in the mask, since there is some kind of
+ * programming error or chip problem.
+ */
+ gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+ /*
+ * In theory, writing GPIOstatus to GPIOclear could
+ * have a bad side-effect on some diagnostic that wanted
+ * to poll for a status-change, but the various shadows
+ * make that problematic at best. Diags will just suppress
+ * all GPIO interrupts during such tests.
+ */
+ qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
+ /*
+ * Check for QSFP MOD_PRS changes
+ * only works for single port if IB1 != pidx1
+ */
+ for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
+ ++pidx) {
+ struct qib_pportdata *ppd;
+ struct qib_qsfp_data *qd;
+ u32 mask;
+ if (!dd->pport[pidx].link_speed_supported)
+ continue;
+ mask = QSFP_GPIO_MOD_PRS_N;
+ ppd = dd->pport + pidx;
+ mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
+ if (gpiostatus & dd->cspec->gpio_mask & mask) {
+ u64 pins;
+ qd = &ppd->cpspec->qsfp_data;
+ gpiostatus &= ~mask;
+ pins = qib_read_kreg64(dd, kr_extstatus);
+ pins >>= SYM_LSB(EXTStatus, GPIOIn);
+ if (!(pins & mask)) {
+ ++handled;
+ qd->t_insert = get_jiffies_64();
+ schedule_work(&qd->work);
+ }
+ }
+ }
+
+ if (gpiostatus && !handled) {
+ const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+ u32 gpio_irq = mask & gpiostatus;
+
+ /*
+ * Clear any troublemakers, and update chip from shadow
+ */
+ dd->cspec->gpio_mask &= ~gpio_irq;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ }
+}
+
+/*
+ * Handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling.
+ */
+static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
+{
+ if (istat & ~QIB_I_BITSEXTANT)
+ unknown_7322_ibits(dd, istat);
+ if (istat & QIB_I_GPIO)
+ unknown_7322_gpio_intr(dd);
+ if (istat & QIB_I_C_ERROR)
+ handle_7322_errors(dd);
+ if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
+ handle_7322_p_errors(dd->rcd[0]->ppd);
+ if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
+ handle_7322_p_errors(dd->rcd[1]->ppd);
+}
+
+/*
+ * Dynamically adjust the rcv int timeout for a context based on incoming
+ * packet rate.
+ */
+static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
+{
+ struct qib_devdata *dd = rcd->dd;
+ u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
+
+ /*
+ * Dynamically adjust idle timeout on chip
+ * based on number of packets processed.
+ */
+ if (npkts < rcv_int_count && timeout > 2)
+ timeout >>= 1;
+ else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
+ timeout = min(timeout << 1, rcv_int_timeout);
+ else
+ return;
+
+ dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
+ qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
+}
+
+/*
+ * This is the main interrupt handler.
+ * It will normally only be used for low frequency interrupts but may
+ * have to handle all interrupts if INTx is enabled or fewer than normal
+ * MSIx interrupts were allocated.
+ * This routine should ignore the interrupt bits for any of the
+ * dedicated MSIx handlers.
+ */
+static irqreturn_t qib_7322intr(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+ irqreturn_t ret;
+ u64 istat;
+ u64 ctxtrbits;
+ u64 rmask;
+ unsigned i;
+ u32 npkts;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ ret = IRQ_HANDLED;
+ goto bail;
+ }
+
+ istat = qib_read_kreg64(dd, kr_intstatus);
+
+ if (unlikely(istat == ~0ULL)) {
+ qib_bad_intrstatus(dd);
+ qib_dev_err(dd, "Interrupt status all f's, skipping\n");
+ /* don't know if it was our interrupt or not */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ istat &= dd->cspec->main_int_mask;
+ if (unlikely(!istat)) {
+ /* already handled, or shared and not us */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* handle "errors" of various kinds first, device ahead of port */
+ if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
+ QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
+ INT_MASK_P(Err, 1))))
+ unlikely_7322_intr(dd, istat);
+
+ /*
+ * Clear the interrupt bits we found set, relatively early, so we
+ * "know" know the chip will have seen this by the time we process
+ * the queue, and will re-interrupt if necessary. The processor
+ * itself won't take the interrupt again until we return.
+ */
+ qib_write_kreg(dd, kr_intclear, istat);
+
+ /*
+ * Handle kernel receive queues before checking for pio buffers
+ * available since receives can overflow; piobuf waiters can afford
+ * a few extra cycles, since they were waiting anyway.
+ */
+ ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
+ if (ctxtrbits) {
+ rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
+ (1ULL << QIB_I_RCVURG_LSB);
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ if (ctxtrbits & rmask) {
+ ctxtrbits &= ~rmask;
+ if (dd->rcd[i]) {
+ qib_kreceive(dd->rcd[i], NULL, &npkts);
+ adjust_rcv_timeout(dd->rcd[i], npkts);
+ }
+ }
+ rmask <<= 1;
+ }
+ if (ctxtrbits) {
+ ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
+ (ctxtrbits >> QIB_I_RCVURG_LSB);
+ qib_handle_urcv(dd, ctxtrbits);
+ }
+ }
+
+ if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
+ sdma_7322_intr(dd, istat);
+
+ if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+ qib_ib_piobufavail(dd);
+
+ ret = IRQ_HANDLED;
+bail:
+ return ret;
+}
+
+/*
+ * Dedicated receive packet available interrupt handler.
+ */
+static irqreturn_t qib_7322pintr(int irq, void *data)
+{
+ struct qib_ctxtdata *rcd = data;
+ struct qib_devdata *dd = rcd->dd;
+ u32 npkts;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
+ (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
+
+ qib_kreceive(rcd, NULL, &npkts);
+ adjust_rcv_timeout(rcd, npkts);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send buffer available interrupt handler.
+ */
+static irqreturn_t qib_7322bufavail(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
+
+ /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
+ if (dd->flags & QIB_INITTED)
+ qib_ib_piobufavail(dd);
+ else
+ qib_wantpiobuf_7322_intr(dd, 0);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA interrupt handler.
+ */
+static irqreturn_t sdma_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
+ qib_sdma_intr(ppd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA idle interrupt handler.
+ */
+static irqreturn_t sdma_idle_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
+ qib_sdma_intr(ppd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA progress interrupt handler.
+ */
+static irqreturn_t sdma_progress_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_P(SDmaProgress, 1) :
+ INT_MASK_P(SDmaProgress, 0));
+ qib_sdma_intr(ppd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA cleanup interrupt handler.
+ */
+static irqreturn_t sdma_cleanup_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_PM(SDmaCleanupDone, 1) :
+ INT_MASK_PM(SDmaCleanupDone, 0));
+ qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set up our chip-specific interrupt handler.
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ * If we are using MSIx interrupts, we may fall back to
+ * INTx later, if the interrupt handler doesn't get called
+ * within 1/2 second (see verify_interrupt()).
+ */
+static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
+{
+ int ret, i, msixnum;
+ u64 redirect[6];
+ u64 mask;
+
+ if (!dd->num_pports)
+ return;
+
+ if (clearpend) {
+ /*
+ * if not switching interrupt types, be sure interrupts are
+ * disabled, and then clear anything pending at this point,
+ * because we are starting clean.
+ */
+ qib_7322_set_intr_state(dd, 0);
+
+ /* clear the reset error, init error/hwerror mask */
+ qib_7322_init_hwerrors(dd);
+
+ /* clear any interrupt bits that might be set */
+ qib_write_kreg(dd, kr_intclear, ~0ULL);
+
+ /* make sure no pending MSIx intr, and clear diag reg */
+ qib_write_kreg(dd, kr_intgranted, ~0ULL);
+ qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
+ }
+
+ if (!dd->cspec->num_msix_entries) {
+ /* Try to get INTx interrupt */
+try_intx:
+ if (!dd->pcidev->irq) {
+ qib_dev_err(dd, "irq is 0, BIOS error? "
+ "Interrupts won't work\n");
+ goto bail;
+ }
+ ret = request_irq(dd->pcidev->irq, qib_7322intr,
+ IRQF_SHARED, QIB_DRV_NAME, dd);
+ if (ret) {
+ qib_dev_err(dd, "Couldn't setup INTx "
+ "interrupt (irq=%d): %d\n",
+ dd->pcidev->irq, ret);
+ goto bail;
+ }
+ dd->cspec->irq = dd->pcidev->irq;
+ dd->cspec->main_int_mask = ~0ULL;
+ goto bail;
+ }
+
+ /* Try to get MSIx interrupts */
+ memset(redirect, 0, sizeof redirect);
+ mask = ~0ULL;
+ msixnum = 0;
+ for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
+ irq_handler_t handler;
+ const char *name;
+ void *arg;
+ u64 val;
+ int lsb, reg, sh;
+
+ if (i < ARRAY_SIZE(irq_table)) {
+ if (irq_table[i].port) {
+ /* skip if for a non-configured port */
+ if (irq_table[i].port > dd->num_pports)
+ continue;
+ arg = dd->pport + irq_table[i].port - 1;
+ } else
+ arg = dd;
+ lsb = irq_table[i].lsb;
+ handler = irq_table[i].handler;
+ name = irq_table[i].name;
+ } else {
+ unsigned ctxt;
+
+ ctxt = i - ARRAY_SIZE(irq_table);
+ /* per krcvq context receive interrupt */
+ arg = dd->rcd[ctxt];
+ if (!arg)
+ continue;
+ lsb = QIB_I_RCVAVAIL_LSB + ctxt;
+ handler = qib_7322pintr;
+ name = QIB_DRV_NAME " (kctx)";
+ }
+ ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
+ handler, 0, name, arg);
+ if (ret) {
+ /*
+ * Shouldn't happen since the enable said we could
+ * have as many as we are trying to setup here.
+ */
+ qib_dev_err(dd, "Couldn't setup MSIx "
+ "interrupt (vec=%d, irq=%d): %d\n", msixnum,
+ dd->cspec->msix_entries[msixnum].vector,
+ ret);
+ qib_7322_nomsix(dd);
+ goto try_intx;
+ }
+ dd->cspec->msix_arg[msixnum] = arg;
+ if (lsb >= 0) {
+ reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
+ sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
+ SYM_LSB(IntRedirect0, vec1);
+ mask &= ~(1ULL << lsb);
+ redirect[reg] |= ((u64) msixnum) << sh;
+ }
+ val = qib_read_kreg64(dd, 2 * msixnum + 1 +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+ msixnum++;
+ }
+ /* Initialize the vector mapping */
+ for (i = 0; i < ARRAY_SIZE(redirect); i++)
+ qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
+ dd->cspec->main_int_mask = mask;
+bail:;
+}
+
+/**
+ * qib_7322_boardname - fill in the board name and note features
+ * @dd: the qlogic_ib device
+ *
+ * info will be based on the board revision register
+ */
+static unsigned qib_7322_boardname(struct qib_devdata *dd)
+{
+ /* Will need enumeration of board-types here */
+ char *n;
+ u32 boardid, namelen;
+ unsigned features = DUAL_PORT_CAP;
+
+ boardid = SYM_FIELD(dd->revision, Revision, BoardID);
+
+ switch (boardid) {
+ case 0:
+ n = "InfiniPath_QLE7342_Emulation";
+ break;
+ case 1:
+ n = "InfiniPath_QLE7340";
+ dd->flags |= QIB_HAS_QSFP;
+ features = PORT_SPD_CAP;
+ break;
+ case 2:
+ n = "InfiniPath_QLE7342";
+ dd->flags |= QIB_HAS_QSFP;
+ break;
+ case 3:
+ n = "InfiniPath_QMI7342";
+ break;
+ case 4:
+ n = "InfiniPath_Unsupported7342";
+ qib_dev_err(dd, "Unsupported version of QMH7342\n");
+ features = 0;
+ break;
+ case BOARD_QMH7342:
+ n = "InfiniPath_QMH7342";
+ features = 0x24;
+ break;
+ case BOARD_QME7342:
+ n = "InfiniPath_QME7342";
+ break;
+ case 15:
+ n = "InfiniPath_QLE7342_TEST";
+ dd->flags |= QIB_HAS_QSFP;
+ break;
+ default:
+ n = "InfiniPath_QLE73xy_UNKNOWN";
+ qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
+ break;
+ }
+ dd->board_atten = 1; /* index into txdds_Xdr */
+
+ namelen = strlen(n) + 1;
+ dd->boardname = kmalloc(namelen, GFP_KERNEL);
+ if (!dd->boardname)
+ qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+ else
+ snprintf(dd->boardname, namelen, "%s", n);
+
+ snprintf(dd->boardversion, sizeof(dd->boardversion),
+ "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+ QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ dd->majrev, dd->minrev,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+
+ if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
+ qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
+ " by module parameter\n", dd->unit);
+ features &= PORT_SPD_CAP;
+ }
+
+ return features;
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context.
+ */
+static int qib_do_7322_reset(struct qib_devdata *dd)
+{
+ u64 val;
+ u64 *msix_vecsave;
+ int i, msix_entries, ret = 1;
+ u16 cmdval;
+ u8 int_line, clinesz;
+ unsigned long flags;
+
+ /* Use dev_err so it shows up in logs, etc. */
+ qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+ qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+ msix_entries = dd->cspec->num_msix_entries;
+
+ /* no interrupts till re-initted */
+ qib_7322_set_intr_state(dd, 0);
+
+ if (msix_entries) {
+ qib_7322_nomsix(dd);
+ /* can be up to 512 bytes, too big for stack */
+ msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
+ sizeof(u64), GFP_KERNEL);
+ if (!msix_vecsave)
+ qib_dev_err(dd, "No mem to save MSIx data\n");
+ } else
+ msix_vecsave = NULL;
+
+ /*
+ * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
+ * info that is set up by the BIOS, so we have to save and restore
+ * it ourselves. There is some risk something could change it,
+ * after we save it, but since we have disabled the MSIx, it
+ * shouldn't be touched...
+ */
+ for (i = 0; i < msix_entries; i++) {
+ u64 vecaddr, vecdata;
+ vecaddr = qib_read_kreg64(dd, 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+ vecdata = qib_read_kreg64(dd, 1 + 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+ if (msix_vecsave) {
+ msix_vecsave[2 * i] = vecaddr;
+ /* save it without the masked bit set */
+ msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
+ }
+ }
+
+ dd->pport->cpspec->ibdeltainprog = 0;
+ dd->pport->cpspec->ibsymdelta = 0;
+ dd->pport->cpspec->iblnkerrdelta = 0;
+ dd->pport->cpspec->ibmalfdelta = 0;
+ dd->int_counter = 0; /* so we check interrupts work again */
+
+ /*
+ * Keep chip from being accessed until we are ready. Use
+ * writeq() directly, to allow the write even though QIB_PRESENT
+ * isnt' set.
+ */
+ dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
+ dd->flags |= QIB_DOING_RESET;
+ val = dd->control | QLOGIC_IB_C_RESET;
+ writeq(val, &dd->kregbase[kr_control]);
+
+ for (i = 1; i <= 5; i++) {
+ /*
+ * Allow MBIST, etc. to complete; longer on each retry.
+ * We sometimes get machine checks from bus timeout if no
+ * response, so for now, make it *really* long.
+ */
+ msleep(1000 + (1 + i) * 3000);
+
+ qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+ /*
+ * Use readq directly, so we don't need to mark it as PRESENT
+ * until we get a successful indication that all is well.
+ */
+ val = readq(&dd->kregbase[kr_revision]);
+ if (val == dd->revision)
+ break;
+ if (i == 5) {
+ qib_dev_err(dd, "Failed to initialize after reset, "
+ "unusable\n");
+ ret = 0;
+ goto bail;
+ }
+ }
+
+ dd->flags |= QIB_PRESENT; /* it's back */
+
+ if (msix_entries) {
+ /* restore the MSIx vector address and data if saved above */
+ for (i = 0; i < msix_entries; i++) {
+ dd->cspec->msix_entries[i].entry = i;
+ if (!msix_vecsave || !msix_vecsave[2 * i])
+ continue;
+ qib_write_kreg(dd, 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)),
+ msix_vecsave[2 * i]);
+ qib_write_kreg(dd, 1 + 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)),
+ msix_vecsave[1 + 2 * i]);
+ }
+ }
+
+ /* initialize the remaining registers. */
+ for (i = 0; i < dd->num_pports; ++i)
+ write_7322_init_portregs(&dd->pport[i]);
+ write_7322_initregs(dd);
+
+ if (qib_pcie_params(dd, dd->lbus_width,
+ &dd->cspec->num_msix_entries,
+ dd->cspec->msix_entries))
+ qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+
+ qib_setup_7322_interrupt(dd, 1);
+
+ for (i = 0; i < dd->num_pports; ++i) {
+ struct qib_pportdata *ppd = &dd->pport[i];
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+bail:
+ dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
+ kfree(msix_vecsave);
+ return ret;
+}
+
+/**
+ * qib_7322_put_tid - write a TID to the chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ */
+static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ if (!(dd->flags & QIB_PRESENT))
+ return;
+ if (pa != dd->tidinvalid) {
+ u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
+
+ /* paranoia checks */
+ if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ chippa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ chippa |= IBA7322_TID_SZ_4K;
+ pa = chippa;
+ }
+ writeq(pa, tidptr);
+ mmiowb();
+}
+
+/**
+ * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the ctxt
+ *
+ * clear all TID entries for a ctxt, expected and eager.
+ * Used from qib_close().
+ */
+static void qib_7322_clear_tids(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd)
+{
+ u64 __iomem *tidbase;
+ unsigned long tidinv;
+ u32 ctxt;
+ int i;
+
+ if (!dd->kregbase || !rcd)
+ return;
+
+ ctxt = rcd->ctxt;
+
+ tidinv = dd->tidinvalid;
+ tidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ dd->rcvtidbase +
+ ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+ for (i = 0; i < dd->rcvtidcnt; i++)
+ qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+ tidinv);
+
+ tidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ dd->rcvegrbase +
+ rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+ for (i = 0; i < rcd->rcvegrcnt; i++)
+ qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+ tidinv);
+}
+
+/**
+ * qib_7322_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_7322_tidtemplate(struct qib_devdata *dd)
+{
+ /*
+ * For now, we always allocate 4KB buffers (at init) so we can
+ * receive max size packets. We may want a module parameter to
+ * specify 2KB or 4KB and/or make it per port instead of per device
+ * for those who want to reduce memory footprint. Note that the
+ * rcvhdrentsize size must be large enough to hold the largest
+ * IB header (currently 96 bytes) that we expect to handle (plus of
+ * course the 2 dwords of RHF).
+ */
+ if (dd->rcvegrbufsize == 2048)
+ dd->tidtemplate = IBA7322_TID_SZ_2K;
+ else if (dd->rcvegrbufsize == 4096)
+ dd->tidtemplate = IBA7322_TID_SZ_4K;
+ dd->tidinvalid = 0;
+}
+
+/**
+ * qib_init_7322_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+
+static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
+ struct qib_base_info *kinfo)
+{
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
+ QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
+ QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
+ if (rcd->dd->cspec->r1)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
+ if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
+
+ return 0;
+}
+
+static struct qib_message_header *
+qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+ u32 offset = qib_hdrget_offset(rhf_addr);
+
+ return (struct qib_message_header *)
+ (rhf_addr - dd->rhf_offset + offset);
+}
+
+/*
+ * Configure number of contexts.
+ */
+static void qib_7322_config_ctxts(struct qib_devdata *dd)
+{
+ unsigned long flags;
+ u32 nchipctxts;
+
+ nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
+ dd->cspec->numctxts = nchipctxts;
+ if (qib_n_krcv_queues > 1 && dd->num_pports) {
+ /*
+ * Set the mask for which bits from the QPN are used
+ * to select a context number.
+ */
+ dd->qpn_mask = 0x3f;
+ dd->first_user_ctxt = NUM_IB_PORTS +
+ (qib_n_krcv_queues - 1) * dd->num_pports;
+ if (dd->first_user_ctxt > nchipctxts)
+ dd->first_user_ctxt = nchipctxts;
+ dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
+ } else {
+ dd->first_user_ctxt = NUM_IB_PORTS;
+ dd->n_krcv_queues = 1;
+ }
+
+ if (!qib_cfgctxts) {
+ int nctxts = dd->first_user_ctxt + num_online_cpus();
+
+ if (nctxts <= 6)
+ dd->ctxtcnt = 6;
+ else if (nctxts <= 10)
+ dd->ctxtcnt = 10;
+ else if (nctxts <= nchipctxts)
+ dd->ctxtcnt = nchipctxts;
+ } else if (qib_cfgctxts < dd->num_pports)
+ dd->ctxtcnt = dd->num_pports;
+ else if (qib_cfgctxts <= nchipctxts)
+ dd->ctxtcnt = qib_cfgctxts;
+ if (!dd->ctxtcnt) /* none of the above, set to max */
+ dd->ctxtcnt = nchipctxts;
+
+ /*
+ * Chip can be configured for 6, 10, or 18 ctxts, and choice
+ * affects number of eager TIDs per ctxt (1K, 2K, 4K).
+ * Lock to be paranoid about later motion, etc.
+ */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ if (dd->ctxtcnt > 10)
+ dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
+ else if (dd->ctxtcnt > 6)
+ dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
+ /* else configure for default 6 receive ctxts */
+
+ /* The XRC opcode is 5. */
+ dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
+
+ /*
+ * RcvCtrl *must* be written here so that the
+ * chip understands how to change rcvegrcnt below.
+ */
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* kr_rcvegrcnt changes based on the number of contexts enabled */
+ dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+ dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+ dd->num_pports > 1 ? 1024U : 2048U);
+}
+
+static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+
+ int lsb, ret = 0;
+ u64 maskr; /* right-justified mask */
+
+ switch (which) {
+
+ case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
+ ret = ppd->link_width_enabled;
+ goto done;
+
+ case QIB_IB_CFG_LWID: /* Get currently active Link-width */
+ ret = ppd->link_width_active;
+ goto done;
+
+ case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
+ ret = ppd->link_speed_enabled;
+ goto done;
+
+ case QIB_IB_CFG_SPD: /* Get current Link spd */
+ ret = ppd->link_speed_active;
+ goto done;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ break;
+
+ case QIB_IB_CFG_LINKLATENCY:
+ ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
+ SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
+ goto done;
+
+ case QIB_IB_CFG_OP_VLS:
+ ret = ppd->vls_operational;
+ goto done;
+
+ case QIB_IB_CFG_VL_HIGH_CAP:
+ ret = 16;
+ goto done;
+
+ case QIB_IB_CFG_VL_LOW_CAP:
+ ret = 16;
+ goto done;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ OverrunThreshold);
+ goto done;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ PhyerrThreshold);
+ goto done;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ ret = (ppd->cpspec->ibcctrl_a &
+ SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
+ IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+ goto done;
+
+ case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+ lsb = IBA7322_IBC_HRTBT_LSB;
+ maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
+ break;
+
+ case QIB_IB_CFG_PMA_TICKS:
+ /*
+ * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
+ * Since the clock is always 250MHz, the value is 3, 1 or 0.
+ */
+ if (ppd->link_speed_active == QIB_IB_QDR)
+ ret = 3;
+ else if (ppd->link_speed_active == QIB_IB_DDR)
+ ret = 1;
+ else
+ ret = 0;
+ goto done;
+
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
+done:
+ return ret;
+}
+
+/*
+ * Below again cribbed liberally from older version. Do not lean
+ * heavily on it.
+ */
+#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
+#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
+ | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
+
+static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 maskr; /* right-justified mask */
+ int lsb, ret = 0;
+ u16 lcmd, licmd;
+ unsigned long flags;
+
+ switch (which) {
+ case QIB_IB_CFG_LIDLMC:
+ /*
+ * Set LID and LMC. Combined to avoid possible hazard
+ * caller puts LMC in 16MSbits, DLID in 16LSbits of val
+ */
+ lsb = IBA7322_IBC_DLIDLMC_SHIFT;
+ maskr = IBA7322_IBC_DLIDLMC_MASK;
+ /*
+ * For header-checking, the SLID in the packet will
+ * be masked with SendIBSLMCMask, and compared
+ * with SendIBSLIDAssignMask. Make sure we do not
+ * set any bits not covered by the mask, or we get
+ * false-positives.
+ */
+ qib_write_kreg_port(ppd, krp_sendslid,
+ val & (val >> 16) & SendIBSLIDAssignMask);
+ qib_write_kreg_port(ppd, krp_sendslidmask,
+ (val >> 16) & SendIBSLMCMask);
+ break;
+
+ case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
+ ppd->link_width_enabled = val;
+ /* convert IB value to chip register value */
+ if (val == IB_WIDTH_1X)
+ val = 0;
+ else if (val == IB_WIDTH_4X)
+ val = 1;
+ else
+ val = 3;
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
+ lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
+ break;
+
+ case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
+ /*
+ * As with width, only write the actual register if the
+ * link is currently down, otherwise takes effect on next
+ * link change. Since setting is being explictly requested
+ * (via MAD or sysfs), clear autoneg failure status if speed
+ * autoneg is enabled.
+ */
+ ppd->link_speed_enabled = val;
+ val <<= IBA7322_IBC_SPEED_LSB;
+ maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ if (val & (val - 1)) {
+ /* Muliple speeds enabled */
+ val |= IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (val & IBA7322_IBC_SPEED_QDR)
+ val |= IBA7322_IBC_IBTA_1_2_MASK;
+ /* IBTA 1.2 mode + min/max + speed bits are contiguous */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
+ break;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ OverrunThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
+ ppd->cpspec->ibcctrl_a |= (u64) val <<
+ SYM_LSB(IBCCtrlA_0, OverrunThreshold);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ PhyerrThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
+ ppd->cpspec->ibcctrl_a |= (u64) val <<
+ SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PKEYS: /* update pkeys */
+ maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+ qib_write_kreg_port(ppd, krp_partitionkey, maskr);
+ goto bail;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ if (val == IB_LINKINITCMD_POLL)
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
+ else /* SLEEP */
+ ppd->cpspec->ibcctrl_a |=
+ SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ goto bail;
+
+ case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+ /*
+ * Update our housekeeping variables, and set IBC max
+ * size, same as init code; max IBC is max we allow in
+ * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+ * Set even if it's unchanged, print debug message only
+ * on changes.
+ */
+ val = (ppd->ibmaxlen >> 2) + 1;
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
+ ppd->cpspec->ibcctrl_a |= (u64)val <<
+ SYM_LSB(IBCCtrlA_0, MaxPktLen);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ goto bail;
+
+ case QIB_IB_CFG_LSTATE: /* set the IB link state */
+ switch (val & 0xffff0000) {
+ case IB_LINKCMD_DOWN:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+ ppd->cpspec->ibmalfusesnap = 1;
+ ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
+ crp_errlink);
+ if (!ppd->cpspec->ibdeltainprog &&
+ qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap =
+ read_7322_creg32_port(ppd,
+ crp_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap =
+ read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov);
+ }
+ break;
+
+ case IB_LINKCMD_ARMED:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+ if (ppd->cpspec->ibmalfusesnap) {
+ ppd->cpspec->ibmalfusesnap = 0;
+ ppd->cpspec->ibmalfdelta +=
+ read_7322_creg32_port(ppd,
+ crp_errlink) -
+ ppd->cpspec->ibmalfsnap;
+ }
+ break;
+
+ case IB_LINKCMD_ACTIVE:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+ goto bail;
+ }
+ switch (val & 0xffff) {
+ case IB_LINKINITCMD_NOP:
+ licmd = 0;
+ break;
+
+ case IB_LINKINITCMD_POLL:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+ break;
+
+ case IB_LINKINITCMD_SLEEP:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+ break;
+
+ case IB_LINKINITCMD_DISABLE:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+ ppd->cpspec->chase_end = 0;
+ /*
+ * stop state chase counter and timer, if running.
+ * wait forpending timer, but don't clear .data (ppd)!
+ */
+ if (ppd->cpspec->chase_timer.expires) {
+ del_timer_sync(&ppd->cpspec->chase_timer);
+ ppd->cpspec->chase_timer.expires = 0;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+ val & 0xffff);
+ goto bail;
+ }
+ qib_set_ib_7322_lstate(ppd, lcmd, licmd);
+ goto bail;
+
+ case QIB_IB_CFG_OP_VLS:
+ if (ppd->vls_operational != val) {
+ ppd->vls_operational = val;
+ set_vls(ppd);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_VL_HIGH_LIMIT:
+ qib_write_kreg_port(ppd, krp_highprio_limit, val);
+ goto bail;
+
+ case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
+ if (val > 3) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ lsb = IBA7322_IBC_HRTBT_LSB;
+ maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
+ break;
+
+ case QIB_IB_CFG_PORT:
+ /* val is the port number of the switch we are connected to. */
+ if (ppd->dd->cspec->r1) {
+ cancel_delayed_work(&ppd->cpspec->ipg_work);
+ ppd->cpspec->ipg_tries = 0;
+ }
+ goto bail;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+ ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
+ ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+ qib_write_kreg(dd, kr_scratch, 0);
+bail:
+ return ret;
+}
+
+static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+ int ret = 0;
+ u64 val, ctrlb;
+
+ /* only IBC loopback, may add serdes and xgxs loopbacks later */
+ if (!strncmp(what, "ibc", 3)) {
+ ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
+ Loopback);
+ val = 0; /* disable heart beat, so link will come up */
+ qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+ ppd->dd->unit, ppd->port);
+ } else if (!strncmp(what, "off", 3)) {
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
+ Loopback);
+ /* enable heart beat again */
+ val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
+ qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+ "(normal)\n", ppd->dd->unit, ppd->port);
+ } else
+ ret = -EINVAL;
+ if (!ret) {
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
+ << IBA7322_IBC_HRTBT_LSB);
+ ppd->cpspec->ibcctrl_b = ctrlb | val;
+ qib_write_kreg_port(ppd, krp_ibcctrl_b,
+ ppd->cpspec->ibcctrl_b);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ }
+ return ret;
+}
+
+static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
+ struct ib_vl_weight_elem *vl)
+{
+ unsigned i;
+
+ for (i = 0; i < 16; i++, regno++, vl++) {
+ u32 val = qib_read_kreg_port(ppd, regno);
+
+ vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
+ SYM_RMASK(LowPriority0_0, VirtualLane);
+ vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
+ SYM_RMASK(LowPriority0_0, Weight);
+ }
+}
+
+static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
+ struct ib_vl_weight_elem *vl)
+{
+ unsigned i;
+
+ for (i = 0; i < 16; i++, regno++, vl++) {
+ u64 val;
+
+ val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
+ SYM_LSB(LowPriority0_0, VirtualLane)) |
+ ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
+ SYM_LSB(LowPriority0_0, Weight));
+ qib_write_kreg_port(ppd, regno, val);
+ }
+ if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ }
+}
+
+static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
+{
+ switch (which) {
+ case QIB_IB_TBL_VL_HIGH_ARB:
+ get_vl_weights(ppd, krp_highprio_0, t);
+ break;
+
+ case QIB_IB_TBL_VL_LOW_ARB:
+ get_vl_weights(ppd, krp_lowprio_0, t);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
+{
+ switch (which) {
+ case QIB_IB_TBL_VL_HIGH_ARB:
+ set_vl_weights(ppd, krp_highprio_0, t);
+ break;
+
+ case QIB_IB_TBL_VL_LOW_ARB:
+ set_vl_weights(ppd, krp_lowprio_0, t);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ u32 updegr, u32 egrhd)
+{
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
+{
+ u32 head, tail;
+
+ head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+ if (rcd->rcvhdrtail_kvaddr)
+ tail = qib_get_rcvhdrtail(rcd);
+ else
+ tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+ return head == tail;
+}
+
+#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
+ QIB_RCVCTRL_CTXT_DIS | \
+ QIB_RCVCTRL_TIDFLOW_ENB | \
+ QIB_RCVCTRL_TIDFLOW_DIS | \
+ QIB_RCVCTRL_TAILUPD_ENB | \
+ QIB_RCVCTRL_TAILUPD_DIS | \
+ QIB_RCVCTRL_INTRAVAIL_ENB | \
+ QIB_RCVCTRL_INTRAVAIL_DIS | \
+ QIB_RCVCTRL_BP_ENB | \
+ QIB_RCVCTRL_BP_DIS)
+
+#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
+ QIB_RCVCTRL_CTXT_DIS | \
+ QIB_RCVCTRL_PKEY_DIS | \
+ QIB_RCVCTRL_PKEY_ENB)
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specifc, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
+ int ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+ u64 mask, val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+
+ if (op & QIB_RCVCTRL_TIDFLOW_ENB)
+ dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
+ if (op & QIB_RCVCTRL_TIDFLOW_DIS)
+ dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
+ if (op & QIB_RCVCTRL_TAILUPD_ENB)
+ dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
+ if (op & QIB_RCVCTRL_TAILUPD_DIS)
+ dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
+ if (op & QIB_RCVCTRL_PKEY_ENB)
+ ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
+ if (op & QIB_RCVCTRL_PKEY_DIS)
+ ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
+ if (ctxt < 0) {
+ mask = (1ULL << dd->ctxtcnt) - 1;
+ rcd = NULL;
+ } else {
+ mask = (1ULL << ctxt);
+ rcd = dd->rcd[ctxt];
+ }
+ if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
+ ppd->p_rcvctrl |=
+ (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
+ if (!(dd->flags & QIB_NODMA_RTAIL)) {
+ op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
+ dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
+ }
+ /* Write these registers before the context is enabled. */
+ qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
+ rcd->rcvhdrqtailaddr_phys);
+ qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
+ rcd->rcvhdrq_phys);
+ rcd->seq_cnt = 1;
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS)
+ ppd->p_rcvctrl &=
+ ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
+ if (op & QIB_RCVCTRL_BP_ENB)
+ dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
+ if (op & QIB_RCVCTRL_BP_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
+ if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+ dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
+ if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
+ /*
+ * Decide which registers to write depending on the ops enabled.
+ * Special case is "flush" (no bits set at all)
+ * which needs to write both.
+ */
+ if (op == 0 || (op & RCVCTRL_COMMON_MODS))
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ if (op == 0 || (op & RCVCTRL_PORT_MODS))
+ qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
+ if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
+ /*
+ * Init the context registers also; if we were
+ * disabled, tail and head should both be zero
+ * already from the enable, but since we don't
+ * know, we have to do it explictly.
+ */
+ val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+ qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+ /* be sure enabling write seen; hd/tl should be 0 */
+ (void) qib_read_kreg32(dd, kr_scratch);
+ val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+ dd->rcd[ctxt]->head = val;
+ /* If kctxt, interrupt on next receive. */
+ if (ctxt < dd->first_user_ctxt)
+ val |= dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
+ dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
+ /* arm rcv interrupt */
+ val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS) {
+ unsigned f;
+
+ /* Now that the context is disabled, clear these registers. */
+ if (ctxt >= 0) {
+ qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
+ qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
+ for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
+ qib_write_ureg(dd, ur_rcvflowtable + f,
+ TIDFLOW_ERRBITS, ctxt);
+ } else {
+ unsigned i;
+
+ for (i = 0; i < dd->cfgctxts; i++) {
+ qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
+ i, 0);
+ qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
+ for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
+ qib_write_ureg(dd, ur_rcvflowtable + f,
+ TIDFLOW_ERRBITS, i);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function where there are multiple such registers with
+ * slightly different layouts.
+ * The chip doesn't allow back-to-back sendctrl writes, so write
+ * the scratch register after writing sendctrl.
+ *
+ * Which register is written depends on the operation.
+ * Most operate on the common register, while
+ * SEND_ENB and SEND_DIS operate on the per-port ones.
+ * SEND_ENB is included in common because it can change SPCL_TRIG
+ */
+#define SENDCTRL_COMMON_MODS (\
+ QIB_SENDCTRL_CLEAR | \
+ QIB_SENDCTRL_AVAIL_DIS | \
+ QIB_SENDCTRL_AVAIL_ENB | \
+ QIB_SENDCTRL_AVAIL_BLIP | \
+ QIB_SENDCTRL_DISARM | \
+ QIB_SENDCTRL_DISARM_ALL | \
+ QIB_SENDCTRL_SEND_ENB)
+
+#define SENDCTRL_PORT_MODS (\
+ QIB_SENDCTRL_CLEAR | \
+ QIB_SENDCTRL_SEND_ENB | \
+ QIB_SENDCTRL_SEND_DIS | \
+ QIB_SENDCTRL_FLUSH)
+
+static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 tmp_dd_sendctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+ /* First the dd ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_CLEAR)
+ dd->sendctrl = 0;
+ if (op & QIB_SENDCTRL_AVAIL_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+ else if (op & QIB_SENDCTRL_AVAIL_ENB) {
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
+ if (dd->flags & QIB_USE_SPCL_TRIG)
+ dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
+ }
+
+ /* Then the ppd ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_SEND_DIS)
+ ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
+ else if (op & QIB_SENDCTRL_SEND_ENB)
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
+
+ if (op & QIB_SENDCTRL_DISARM_ALL) {
+ u32 i, last;
+
+ tmp_dd_sendctrl = dd->sendctrl;
+ last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+ /*
+ * Disarm any buffers that are not yet launched,
+ * disabling updates until done.
+ */
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+ for (i = 0; i < last; i++) {
+ qib_write_kreg(dd, kr_sendctrl,
+ tmp_dd_sendctrl |
+ SYM_MASK(SendCtrl, Disarm) | i);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ }
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
+
+ /*
+ * Now drain all the fifos. The Abort bit should never be
+ * needed, so for now, at least, we don't use it.
+ */
+ tmp_ppd_sendctrl |=
+ SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
+ SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
+ SYM_MASK(SendCtrl_0, TxeBypassIbc);
+ qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ tmp_dd_sendctrl = dd->sendctrl;
+
+ if (op & QIB_SENDCTRL_DISARM)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+ ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
+ SYM_LSB(SendCtrl, DisarmSendBuf));
+ if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
+ (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+ if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u32 v;
+ /*
+ * ensure writes have hit chip, then do a few
+ * more reads, to allow DMA of pioavail registers
+ * to occur, so in-memory copy is in sync with
+ * the chip. Not always safe to sleep.
+ */
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+}
+
+#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
+#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
+#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
+
+/**
+ * qib_portcntr_7322 - read a per-port chip counter
+ * @ppd: the qlogic_ib pport
+ * @creg: the counter to read (not a chip offset)
+ */
+static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 ret = 0ULL;
+ u16 creg;
+ /* 0xffff for unimplemented or synthesized counters */
+ static const u32 xlator[] = {
+ [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
+ [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
+ [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
+ [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
+ [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
+ [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
+ [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
+ [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
+ [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
+ [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
+ [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
+ [QIBPORTCNTR_ERRICRC] = crp_erricrc,
+ [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
+ [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
+ [QIBPORTCNTR_BADFORMAT] = crp_badformat,
+ [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
+ [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
+ [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
+ [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
+ [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
+ [QIBPORTCNTR_ERRLINK] = crp_errlink,
+ [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
+ [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
+ [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
+ [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
+ [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
+ /*
+ * the next 3 aren't really counters, but were implemented
+ * as counters in older chips, so still get accessed as
+ * though they were counters from this code.
+ */
+ [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
+ [QIBPORTCNTR_PSSTART] = krp_psstart,
+ [QIBPORTCNTR_PSSTAT] = krp_psstat,
+ /* pseudo-counter, summed for all ports */
+ [QIBPORTCNTR_KHDROVFL] = 0xffff,
+ };
+
+ if (reg >= ARRAY_SIZE(xlator)) {
+ qib_devinfo(ppd->dd->pcidev,
+ "Unimplemented portcounter %u\n", reg);
+ goto done;
+ }
+ creg = xlator[reg] & _PORT_CNTR_IDXMASK;
+
+ /* handle non-counters and special cases first */
+ if (reg == QIBPORTCNTR_KHDROVFL) {
+ int i;
+
+ /* sum over all kernel contexts (skip if mini_init) */
+ for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
+ struct qib_ctxtdata *rcd = dd->rcd[i];
+
+ if (!rcd || rcd->ppd != ppd)
+ continue;
+ ret += read_7322_creg32(dd, cr_base_egrovfl + i);
+ }
+ goto done;
+ } else if (reg == QIBPORTCNTR_RXDROPPKT) {
+ /*
+ * Used as part of the synthesis of port_rcv_errors
+ * in the verbs code for IBTA counters. Not needed for 7322,
+ * because all the errors are already counted by other cntrs.
+ */
+ goto done;
+ } else if (reg == QIBPORTCNTR_PSINTERVAL ||
+ reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
+ /* were counters in older chips, now per-port kernel regs */
+ ret = qib_read_kreg_port(ppd, creg);
+ goto done;
+ }
+
+ /*
+ * Only fast increment counters are 64 bits; use 32 bit reads to
+ * avoid two independent reads when on Opteron.
+ */
+ if (xlator[reg] & _PORT_64BIT_FLAG)
+ ret = read_7322_creg_port(ppd, creg);
+ else
+ ret = read_7322_creg32_port(ppd, creg);
+ if (creg == crp_ibsymbolerr) {
+ if (ppd->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->ibsymsnap;
+ ret -= ppd->cpspec->ibsymdelta;
+ } else if (creg == crp_iblinkerrrecov) {
+ if (ppd->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->iblnkerrsnap;
+ ret -= ppd->cpspec->iblnkerrdelta;
+ } else if (creg == crp_errlink)
+ ret -= ppd->cpspec->ibmalfdelta;
+ else if (creg == crp_iblinkdown)
+ ret += ppd->cpspec->iblnkdowndelta;
+done:
+ return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string. Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility. Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr7322indices contains the corresponding register indices.
+ */
+static const char cntr7322names[] =
+ "Interrupts\n"
+ "HostBusStall\n"
+ "E RxTIDFull\n"
+ "RxTIDInvalid\n"
+ "RxTIDFloDrop\n" /* 7322 only */
+ "Ctxt0EgrOvfl\n"
+ "Ctxt1EgrOvfl\n"
+ "Ctxt2EgrOvfl\n"
+ "Ctxt3EgrOvfl\n"
+ "Ctxt4EgrOvfl\n"
+ "Ctxt5EgrOvfl\n"
+ "Ctxt6EgrOvfl\n"
+ "Ctxt7EgrOvfl\n"
+ "Ctxt8EgrOvfl\n"
+ "Ctxt9EgrOvfl\n"
+ "Ctx10EgrOvfl\n"
+ "Ctx11EgrOvfl\n"
+ "Ctx12EgrOvfl\n"
+ "Ctx13EgrOvfl\n"
+ "Ctx14EgrOvfl\n"
+ "Ctx15EgrOvfl\n"
+ "Ctx16EgrOvfl\n"
+ "Ctx17EgrOvfl\n"
+ ;
+
+static const u32 cntr7322indices[] = {
+ cr_lbint | _PORT_64BIT_FLAG,
+ cr_lbstall | _PORT_64BIT_FLAG,
+ cr_tidfull,
+ cr_tidinvalid,
+ cr_rxtidflowdrop,
+ cr_base_egrovfl + 0,
+ cr_base_egrovfl + 1,
+ cr_base_egrovfl + 2,
+ cr_base_egrovfl + 3,
+ cr_base_egrovfl + 4,
+ cr_base_egrovfl + 5,
+ cr_base_egrovfl + 6,
+ cr_base_egrovfl + 7,
+ cr_base_egrovfl + 8,
+ cr_base_egrovfl + 9,
+ cr_base_egrovfl + 10,
+ cr_base_egrovfl + 11,
+ cr_base_egrovfl + 12,
+ cr_base_egrovfl + 13,
+ cr_base_egrovfl + 14,
+ cr_base_egrovfl + 15,
+ cr_base_egrovfl + 16,
+ cr_base_egrovfl + 17,
+};
+
+/*
+ * same as cntr7322names and cntr7322indices, but for port-specific counters.
+ * portcntr7322indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr7322names[] =
+ "TxPkt\n"
+ "TxFlowPkt\n"
+ "TxWords\n"
+ "RxPkt\n"
+ "RxFlowPkt\n"
+ "RxWords\n"
+ "TxFlowStall\n"
+ "TxDmaDesc\n" /* 7220 and 7322-only */
+ "E RxDlidFltr\n" /* 7220 and 7322-only */
+ "IBStatusChng\n"
+ "IBLinkDown\n"
+ "IBLnkRecov\n"
+ "IBRxLinkErr\n"
+ "IBSymbolErr\n"
+ "RxLLIErr\n"
+ "RxBadFormat\n"
+ "RxBadLen\n"
+ "RxBufOvrfl\n"
+ "RxEBP\n"
+ "RxFlowCtlErr\n"
+ "RxICRCerr\n"
+ "RxLPCRCerr\n"
+ "RxVCRCerr\n"
+ "RxInvalLen\n"
+ "RxInvalPKey\n"
+ "RxPktDropped\n"
+ "TxBadLength\n"
+ "TxDropped\n"
+ "TxInvalLen\n"
+ "TxUnderrun\n"
+ "TxUnsupVL\n"
+ "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
+ "RxVL15Drop\n"
+ "RxVlErr\n"
+ "XcessBufOvfl\n"
+ "RxQPBadCtxt\n" /* 7322-only from here down */
+ "TXBadHeader\n"
+ ;
+
+static const u32 portcntr7322indices[] = {
+ QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+ crp_pktsendflow,
+ QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+ crp_pktrcvflowctrl,
+ QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+ crp_txsdmadesc | _PORT_64BIT_FLAG,
+ crp_rxdlidfltr,
+ crp_ibstatuschange,
+ QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+ crp_rcvflowctrlviol,
+ QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+ crp_txminmaxlenerr,
+ crp_txdroppedpkt,
+ crp_txlenerr,
+ crp_txunderrun,
+ crp_txunsupvl,
+ QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
+ crp_rxqpinvalidctxt,
+ crp_txhdrerr,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_7322_cntrnames(struct qib_devdata *dd)
+{
+ int i, j = 0;
+ char *s;
+
+ for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
+ i++) {
+ /* we always have at least one counter before the egrovfl */
+ if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+ j = 1;
+ s = strchr(s + 1, '\n');
+ if (s && j)
+ j++;
+ }
+ dd->cspec->ncntrs = i;
+ if (!s)
+ /* full list; size is without terminating null */
+ dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
+ else
+ dd->cspec->cntrnamelen = 1 + s - cntr7322names;
+ dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->cntrs)
+ qib_dev_err(dd, "Failed allocation for counters\n");
+
+ for (i = 0, s = (char *)portcntr7322names; s; i++)
+ s = strchr(s + 1, '\n');
+ dd->cspec->nportcntrs = i - 1;
+ dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
+ for (i = 0; i < dd->num_pports; ++i) {
+ dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->pport[i].cpspec->portcntrs)
+ qib_dev_err(dd, "Failed allocation for"
+ " portcounters\n");
+ }
+}
+
+static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+ u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->cntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *) cntr7322names;
+ } else {
+ u64 *cntr = dd->cspec->cntrs;
+ int i;
+
+ ret = dd->cspec->ncntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->ncntrs; i++)
+ if (cntr7322indices[i] & _PORT_64BIT_FLAG)
+ *cntr++ = read_7322_creg(dd,
+ cntr7322indices[i] &
+ _PORT_CNTR_IDXMASK);
+ else
+ *cntr++ = read_7322_creg32(dd,
+ cntr7322indices[i]);
+ }
+done:
+ return ret;
+}
+
+static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+ char **namep, u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->portcntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *)portcntr7322names;
+ } else {
+ struct qib_pportdata *ppd = &dd->pport[port];
+ u64 *cntr = ppd->cpspec->portcntrs;
+ int i;
+
+ ret = dd->cspec->nportcntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->nportcntrs; i++) {
+ if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
+ *cntr++ = qib_portcntr_7322(ppd,
+ portcntr7322indices[i] &
+ _PORT_CNTR_IDXMASK);
+ else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
+ *cntr++ = read_7322_creg_port(ppd,
+ portcntr7322indices[i] &
+ _PORT_CNTR_IDXMASK);
+ else
+ *cntr++ = read_7322_creg32_port(ppd,
+ portcntr7322indices[i]);
+ }
+ }
+done:
+ return ret;
+}
+
+/**
+ * qib_get_7322_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * VESTIGIAL IBA7322 has no "small fast counters", so the only
+ * real purpose of this function is to maintain the notion of
+ * "active time", which in turn is only logged into the eeprom,
+ * which we don;t have, yet, for 7322-based boards.
+ *
+ * called from add_timer
+ */
+static void qib_get_7322_faststats(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+ u64 traffic_wds;
+ int pidx;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ /*
+ * If port isn't enabled or not operational ports, or
+ * diags is running (can cause memory diags to fail)
+ * skip this port this time.
+ */
+ if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
+ || dd->diag_client)
+ continue;
+
+ /*
+ * Maintain an activity timer, based on traffic
+ * exceeding a threshold, so we need to check the word-counts
+ * even if they are 64-bit.
+ */
+ traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
+ qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
+ spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
+ traffic_wds -= ppd->dd->traffic_wds;
+ ppd->dd->traffic_wds += traffic_wds;
+ if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+ atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
+ spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
+ if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
+ QIB_IB_QDR) &&
+ (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE)) &&
+ ppd->cpspec->qdr_dfe_time &&
+ time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
+ ppd->cpspec->qdr_dfe_on = 0;
+
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_INIT_R1 :
+ QDR_STATIC_ADAPT_INIT);
+ force_h1(ppd);
+ }
+ }
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/*
+ * If we were using MSIx, try to fallback to INTx.
+ */
+static int qib_7322_intr_fallback(struct qib_devdata *dd)
+{
+ if (!dd->cspec->num_msix_entries)
+ return 0; /* already using INTx */
+
+ qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
+ " trying INTx interrupts\n");
+ qib_7322_nomsix(dd);
+ qib_enable_intx(dd->pcidev);
+ qib_setup_7322_interrupt(dd, 0);
+ return 1;
+}
+
+/*
+ * Reset the XGXS (between serdes and IBC). Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining. To do this right, we reset IBC
+ * as well, then return to previous state (which may be still in reset)
+ * NOTE: some callers of this "know" this writes the current value
+ * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
+ * check all callers.
+ */
+static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
+{
+ u64 val;
+ struct qib_devdata *dd = ppd->dd;
+ const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
+ SYM_MASK(IBPCSConfig_0, xcv_treset) |
+ SYM_MASK(IBPCSConfig_0, tx_rx_reset);
+
+ val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a &
+ ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
+
+ qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
+ qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+}
+
+/*
+ * This code for non-IBTA-compliant IB speed negotiation is only known to
+ * work for the SDR to DDR transition, and only between an HCA and a switch
+ * with recent firmware. It is based on observed heuristics, rather than
+ * actual knowledge of the non-compliant speed negotiation.
+ * It has a number of hard-coded fields, since the hope is to rewrite this
+ * when a spec is available on how the negoation is intended to work.
+ */
+static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
+ u32 dcnt, u32 *data)
+{
+ int i;
+ u64 pbc;
+ u32 __iomem *piobuf;
+ u32 pnum, control, len;
+ struct qib_devdata *dd = ppd->dd;
+
+ i = 0;
+ len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
+ control = qib_7322_setpbc_control(ppd, len, 0, 15);
+ pbc = ((u64) control << 32) | len;
+ while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
+ if (i++ > 15)
+ return;
+ udelay(2);
+ }
+ /* disable header check on this packet, since it can't be valid */
+ dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
+ writeq(pbc, piobuf);
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdr, 7);
+ qib_pio_copy(piobuf + 9, data, dcnt);
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pnum);
+ /* and re-enable hdr check */
+ dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
+}
+
+/*
+ * _start packet gets sent twice at start, _done gets sent twice at end
+ */
+static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
+{
+ struct qib_devdata *dd = ppd->dd;
+ static u32 swapped;
+ u32 dw, i, hcnt, dcnt, *data;
+ static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
+ static u32 madpayload_start[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
+ };
+ static u32 madpayload_done[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x40000001, 0x1388, 0x15e, /* rest 0's */
+ };
+
+ dcnt = ARRAY_SIZE(madpayload_start);
+ hcnt = ARRAY_SIZE(hdr);
+ if (!swapped) {
+ /* for maintainability, do it at runtime */
+ for (i = 0; i < hcnt; i++) {
+ dw = (__force u32) cpu_to_be32(hdr[i]);
+ hdr[i] = dw;
+ }
+ for (i = 0; i < dcnt; i++) {
+ dw = (__force u32) cpu_to_be32(madpayload_start[i]);
+ madpayload_start[i] = dw;
+ dw = (__force u32) cpu_to_be32(madpayload_done[i]);
+ madpayload_done[i] = dw;
+ }
+ swapped = 1;
+ }
+
+ data = which ? madpayload_done : madpayload_start;
+
+ autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+ autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+}
+
+/*
+ * Do the absolute minimum to cause an IB speed change, and make it
+ * ready, but don't actually trigger the change. The caller will
+ * do that when ready (if link is in Polling training state, it will
+ * happen immediately, otherwise when link next goes down)
+ *
+ * This routine should only be used as part of the DDR autonegotation
+ * code for devices that are not compliant with IB 1.2 (or code that
+ * fixes things up for same).
+ *
+ * When link has gone down, and autoneg enabled, or autoneg has
+ * failed and we give up until next time we set both speeds, and
+ * then we want IBTA enabled as well as "use max enabled speed.
+ */
+static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
+{
+ u64 newctrlb;
+ newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
+ IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK);
+
+ if (speed & (speed - 1)) /* multiple speeds */
+ newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
+ IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ else
+ newctrlb |= speed == QIB_IB_QDR ?
+ IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
+ ((speed == QIB_IB_DDR ?
+ IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
+
+ if (newctrlb == ppd->cpspec->ibcctrl_b)
+ return;
+
+ ppd->cpspec->ibcctrl_b = newctrlb;
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+/*
+ * This routine is only used when we are not talking to another
+ * IB 1.2-compliant device that we think can do DDR.
+ * (This includes all existing switch chips as of Oct 2007.)
+ * 1.2-compliant devices go directly to DDR prior to reaching INIT
+ */
+static void try_7322_autoneg(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ qib_autoneg_7322_send(ppd, 0);
+ set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
+ qib_7322_mini_pcs_reset(ppd);
+ /* 2 msec is minimum length of a poll cycle */
+ schedule_delayed_work(&ppd->cpspec->autoneg_work,
+ msecs_to_jiffies(2));
+}
+
+/*
+ * Handle the empirically determined mechanism for auto-negotiation
+ * of DDR speed with switches.
+ */
+static void autoneg_7322_work(struct work_struct *work)
+{
+ struct qib_pportdata *ppd;
+ struct qib_devdata *dd;
+ u64 startms;
+ u32 i;
+ unsigned long flags;
+
+ ppd = container_of(work, struct qib_chippport_specific,
+ autoneg_work.work)->ppd;
+ dd = ppd->dd;
+
+ startms = jiffies_to_msecs(jiffies);
+
+ /*
+ * Busy wait for this first part, it should be at most a
+ * few hundred usec, since we scheduled ourselves for 2msec.
+ */
+ for (i = 0; i < 25; i++) {
+ if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
+ == IB_7322_LT_STATE_POLLQUIET) {
+ qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
+ break;
+ }
+ udelay(100);
+ }
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ goto done; /* we got there early or told to stop */
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(90)))
+ goto done;
+ qib_7322_mini_pcs_reset(ppd);
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(1700)))
+ goto done;
+ qib_7322_mini_pcs_reset(ppd);
+
+ set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
+
+ /*
+ * Wait up to 250 msec for link to train and get to INIT at DDR;
+ * this should terminate early.
+ */
+ wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(250));
+done:
+ if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
+ ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
+ ppd->cpspec->autoneg_tries = 0;
+ }
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ }
+}
+
+/*
+ * This routine is used to request IPG set in the QLogic switch.
+ * Only called if r1.
+ */
+static void try_7322_ipg(struct qib_pportdata *ppd)
+{
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ struct ib_mad_send_buf *send_buf;
+ struct ib_mad_agent *agent;
+ struct ib_smp *smp;
+ unsigned delay;
+ int ret;
+
+ agent = ibp->send_agent;
+ if (!agent)
+ goto retry;
+
+ send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
+ IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ if (IS_ERR(send_buf))
+ goto retry;
+
+ if (!ibp->smi_ah) {
+ struct ib_ah_attr attr;
+ struct ib_ah *ah;
+
+ memset(&attr, 0, sizeof attr);
+ attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
+ attr.port_num = ppd->port;
+ ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
+ if (IS_ERR(ah))
+ ret = -EINVAL;
+ else {
+ send_buf->ah = ah;
+ ibp->smi_ah = to_iah(ah);
+ ret = 0;
+ }
+ } else {
+ send_buf->ah = &ibp->smi_ah->ibah;
+ ret = 0;
+ }
+
+ smp = send_buf->mad;
+ smp->base_version = IB_MGMT_BASE_VERSION;
+ smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
+ smp->class_version = 1;
+ smp->method = IB_MGMT_METHOD_SEND;
+ smp->hop_cnt = 1;
+ smp->attr_id = QIB_VENDOR_IPG;
+ smp->attr_mod = 0;
+
+ if (!ret)
+ ret = ib_post_send_mad(send_buf, NULL);
+ if (ret)
+ ib_free_send_mad(send_buf);
+retry:
+ delay = 2 << ppd->cpspec->ipg_tries;
+ schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
+}
+
+/*
+ * Timeout handler for setting IPG.
+ * Only called if r1.
+ */
+static void ipg_7322_work(struct work_struct *work)
+{
+ struct qib_pportdata *ppd;
+
+ ppd = container_of(work, struct qib_chippport_specific,
+ ipg_work.work)->ppd;
+ if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
+ && ++ppd->cpspec->ipg_tries <= 10)
+ try_7322_ipg(ppd);
+}
+
+static u32 qib_7322_iblink_state(u64 ibcs)
+{
+ u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
+
+ switch (state) {
+ case IB_7322_L_STATE_INIT:
+ state = IB_PORT_INIT;
+ break;
+ case IB_7322_L_STATE_ARM:
+ state = IB_PORT_ARMED;
+ break;
+ case IB_7322_L_STATE_ACTIVE:
+ /* fall through */
+ case IB_7322_L_STATE_ACT_DEFER:
+ state = IB_PORT_ACTIVE;
+ break;
+ default: /* fall through */
+ case IB_7322_L_STATE_DOWN:
+ state = IB_PORT_DOWN;
+ break;
+ }
+ return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_7322_phys_portstate(u64 ibcs)
+{
+ u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
+ return qib_7322_physportstate[state];
+}
+
+static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+ int ret = 0, symadj = 0;
+ unsigned long flags;
+ int mult;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ /* Update our picture of width and speed from chip */
+ if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
+ ppd->link_speed_active = QIB_IB_QDR;
+ mult = 4;
+ } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
+ ppd->link_speed_active = QIB_IB_DDR;
+ mult = 2;
+ } else {
+ ppd->link_speed_active = QIB_IB_SDR;
+ mult = 1;
+ }
+ if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
+ ppd->link_width_active = IB_WIDTH_4X;
+ mult *= 4;
+ } else
+ ppd->link_width_active = IB_WIDTH_1X;
+ ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
+
+ if (!ibup) {
+ u64 clr;
+
+ /* Link went down. */
+ /* do IPG MAD again after linkdown, even if last time failed */
+ ppd->cpspec->ipg_tries = 0;
+ clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
+ (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
+ SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
+ if (clr)
+ qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
+ if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)))
+ set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ /* unlock the Tx settings, speed may change */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+ qib_cancel_sends(ppd);
+ /* on link down, ensure sane pcs state */
+ qib_7322_mini_pcs_reset(ppd);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ if (__qib_sdma_running(ppd))
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e70_go_idle);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ clr = read_7322_creg32_port(ppd, crp_iblinkdown);
+ if (clr == ppd->cpspec->iblnkdownsnap)
+ ppd->cpspec->iblnkdowndelta++;
+ } else {
+ if (qib_compat_ddr_negotiate &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)) &&
+ ppd->link_speed_active == QIB_IB_SDR &&
+ (ppd->link_speed_enabled & QIB_IB_DDR)
+ && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
+ /* we are SDR, and auto-negotiation enabled */
+ ++ppd->cpspec->autoneg_tries;
+ if (!ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymdelta +=
+ read_7322_creg32_port(ppd,
+ crp_ibsymbolerr) -
+ ppd->cpspec->ibsymsnap;
+ ppd->cpspec->iblnkerrdelta +=
+ read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov) -
+ ppd->cpspec->iblnkerrsnap;
+ }
+ try_7322_autoneg(ppd);
+ ret = 1; /* no other IB status change processing */
+ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ ppd->link_speed_active == QIB_IB_SDR) {
+ qib_autoneg_7322_send(ppd, 1);
+ set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
+ qib_7322_mini_pcs_reset(ppd);
+ udelay(2);
+ ret = 1; /* no other IB status change processing */
+ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ (ppd->link_speed_active & QIB_IB_DDR)) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
+ QIBL_IB_AUTONEG_FAILED);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ ppd->cpspec->autoneg_tries = 0;
+ /* re-enable SDR, for next link down */
+ set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ symadj = 1;
+ } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
+ /*
+ * Clear autoneg failure flag, and do setup
+ * so we'll try next time link goes down and
+ * back to INIT (possibly connected to a
+ * different device).
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
+ symadj = 1;
+ }
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ symadj = 1;
+ if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
+ try_7322_ipg(ppd);
+ if (!ppd->cpspec->recovery_init)
+ setup_7322_link_recovery(ppd, 0);
+ ppd->cpspec->qdr_dfe_time = jiffies +
+ msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
+ }
+ ppd->cpspec->ibmalfusesnap = 0;
+ ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
+ crp_errlink);
+ }
+ if (symadj) {
+ ppd->cpspec->iblnkdownsnap =
+ read_7322_creg32_port(ppd, crp_iblinkdown);
+ if (ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 0;
+ ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
+ crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
+ ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
+ }
+ } else if (!ibup && qib_compat_ddr_negotiate &&
+ !ppd->cpspec->ibdeltainprog &&
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
+ crp_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov);
+ }
+
+ if (!ret)
+ qib_setup_7322_setextled(ppd, ibup);
+ return ret;
+}
+
+/*
+ * Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+ u64 read_val, new_out;
+ unsigned long flags;
+
+ if (mask) {
+ /* some bits being written, lock access to GPIO */
+ dir &= mask;
+ out &= mask;
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+ dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+ new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_out, new_out);
+ dd->cspec->gpio_out = new_out;
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ }
+ /*
+ * It is unlikely that a read at this time would get valid
+ * data on a pin whose direction line was set in the same
+ * call to this function. We include the read here because
+ * that allows us to potentially combine a change on one pin with
+ * a read on another, and because the old code did something like
+ * this.
+ */
+ read_val = qib_read_kreg64(dd, kr_extstatus);
+ return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/* Enable writes to config EEPROM, if possible. Returns previous state */
+static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+ int prev_wen;
+ u32 mask;
+
+ mask = 1 << QIB_EEPROM_WEN_NUM;
+ prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
+ gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
+
+ return prev_wen & 1;
+}
+
+/*
+ * Read fundamental info we need to use the chip. These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_7322_chip_params(struct qib_devdata *dd)
+{
+ u64 val;
+ u32 piobufs;
+ int mtu;
+
+ dd->palign = qib_read_kreg32(dd, kr_pagealign);
+
+ dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+ dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+ dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+ dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+ dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+ dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+ val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+ dd->piobcnt2k = val & ~0U;
+ dd->piobcnt4k = val >> 32;
+ val = qib_read_kreg64(dd, kr_sendpiosize);
+ dd->piosize2k = val & ~0U;
+ dd->piosize4k = val >> 32;
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+ dd->pport[0].ibmtu = (u32)mtu;
+ dd->pport[1].ibmtu = (u32)mtu;
+
+ /* these may be adjusted in init_chip_wc_pat() */
+ dd->pio2kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
+ dd->pio4kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ (dd->piobufbase >> 32));
+ /*
+ * 4K buffers take 2 pages; we use roundup just to be
+ * paranoid; we calculate it once here, rather than on
+ * ever buf allocate
+ */
+ dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+
+ piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
+
+ dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+ (sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * get_7322_chip_params(), so split out as separate function
+ */
+static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
+{
+ u32 cregbase;
+ cregbase = qib_read_kreg32(dd, kr_counterregbase);
+
+ dd->cspec->cregbase = (u64 __iomem *)(cregbase +
+ (char __iomem *)dd->kregbase);
+
+ dd->egrtidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->rcvegrbase);
+
+ /* port registers are defined as relative to base of chip */
+ dd->pport[0].cpspec->kpregbase =
+ (u64 __iomem *)((char __iomem *)dd->kregbase);
+ dd->pport[1].cpspec->kpregbase =
+ (u64 __iomem *)(dd->palign +
+ (char __iomem *)dd->kregbase);
+ dd->pport[0].cpspec->cpregbase =
+ (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
+ kr_counterregbase) + (char __iomem *)dd->kregbase);
+ dd->pport[1].cpspec->cpregbase =
+ (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
+ kr_counterregbase) + (char __iomem *)dd->kregbase);
+}
+
+/*
+ * This is a fairly special-purpose observer, so we only support
+ * the port-specific parts of SendCtrl
+ */
+
+#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
+ SYM_MASK(SendCtrl_0, SDmaEnable) | \
+ SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
+ SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
+ SYM_MASK(SendCtrl_0, SDmaHalt) | \
+ SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
+ SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
+
+static int sendctrl_hook(struct qib_devdata *dd,
+ const struct diag_observer *op, u32 offs,
+ u64 *data, u64 mask, int only_32)
+{
+ unsigned long flags;
+ unsigned idx;
+ unsigned pidx;
+ struct qib_pportdata *ppd = NULL;
+ u64 local_data, all_bits;
+
+ /*
+ * The fixed correspondence between Physical ports and pports is
+ * severed. We need to hunt for the ppd that corresponds
+ * to the offset we got. And we have to do that without admitting
+ * we know the stride, apparently.
+ */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ u64 __iomem *psptr;
+ u32 psoffs;
+
+ ppd = dd->pport + pidx;
+ if (!ppd->cpspec->kpregbase)
+ continue;
+
+ psptr = ppd->cpspec->kpregbase + krp_sendctrl;
+ psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
+ if (psoffs == offs)
+ break;
+ }
+
+ /* If pport is not being managed by driver, just avoid shadows. */
+ if (pidx >= dd->num_pports)
+ ppd = NULL;
+
+ /* In any case, "idx" is flat index in kreg space */
+ idx = offs / sizeof(u64);
+
+ all_bits = ~0ULL;
+ if (only_32)
+ all_bits >>= 32;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (!ppd || (mask & all_bits) != all_bits) {
+ /*
+ * At least some mask bits are zero, so we need
+ * to read. The judgement call is whether from
+ * reg or shadow. First-cut: read reg, and complain
+ * if any bits which should be shadowed are different
+ * from their shadowed value.
+ */
+ if (only_32)
+ local_data = (u64)qib_read_kreg32(dd, idx);
+ else
+ local_data = qib_read_kreg64(dd, idx);
+ *data = (local_data & ~mask) | (*data & mask);
+ }
+ if (mask) {
+ /*
+ * At least some mask bits are one, so we need
+ * to write, but only shadow some bits.
+ */
+ u64 sval, tval; /* Shadowed, transient */
+
+ /*
+ * New shadow val is bits we don't want to touch,
+ * ORed with bits we do, that are intended for shadow.
+ */
+ if (ppd) {
+ sval = ppd->p_sendctrl & ~mask;
+ sval |= *data & SENDCTRL_SHADOWED & mask;
+ ppd->p_sendctrl = sval;
+ } else
+ sval = *data & SENDCTRL_SHADOWED & mask;
+ tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
+ qib_write_kreg(dd, idx, tval);
+ qib_write_kreg(dd, kr_scratch, 0Ull);
+ }
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ return only_32 ? 4 : 8;
+}
+
+static const struct diag_observer sendctrl_0_observer = {
+ sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
+ KREG_IDX(SendCtrl_0) * sizeof(u64)
+};
+
+static const struct diag_observer sendctrl_1_observer = {
+ sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
+ KREG_IDX(SendCtrl_1) * sizeof(u64)
+};
+
+static ushort sdma_fetch_prio = 8;
+module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
+MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
+
+/* Besides logging QSFP events, we set appropriate TxDDS values */
+static void init_txdds_table(struct qib_pportdata *ppd, int override);
+
+static void qsfp_7322_event(struct work_struct *work)
+{
+ struct qib_qsfp_data *qd;
+ struct qib_pportdata *ppd;
+ u64 pwrup;
+ int ret;
+ u32 le2;
+
+ qd = container_of(work, struct qib_qsfp_data, work);
+ ppd = qd->ppd;
+ pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
+
+ /*
+ * Some QSFP's not only do not respond until the full power-up
+ * time, but may behave badly if we try. So hold off responding
+ * to insertion.
+ */
+ while (1) {
+ u64 now = get_jiffies_64();
+ if (time_after64(now, pwrup))
+ break;
+ msleep(1);
+ }
+ ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
+ /*
+ * Need to change LE2 back to defaults if we couldn't
+ * read the cable type (to handle cable swaps), so do this
+ * even on failure to read cable information. We don't
+ * get here for QME, so IS_QME check not needed here.
+ */
+ le2 = (!ret && qd->cache.atten[1] >= qib_long_atten &&
+ !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ?
+ LE2_5m : LE2_DEFAULT;
+ ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
+ init_txdds_table(ppd, 0);
+}
+
+/*
+ * There is little we can do but complain to the user if QSFP
+ * initialization fails.
+ */
+static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+ struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
+ struct qib_devdata *dd = ppd->dd;
+ u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
+
+ mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
+ qd->ppd = ppd;
+ qib_qsfp_init(qd, qsfp_7322_event);
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
+ dd->cspec->gpio_mask |= mod_prs_bit;
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+}
+
+/*
+ * called at device initialization time, and also if the txselect
+ * module parameter is changed. This is used for cables that don't
+ * have valid QSFP EEPROMs (not present, or attenuation is zero).
+ * We initialize to the default, then if there is a specific
+ * unit,port match, we use that (and set it immediately, for the
+ * current speed, if the link is at INIT or better).
+ * String format is "default# unit#,port#=# ... u,p=#", separators must
+ * be a SPACE character. A newline terminates. The u,p=# tuples may
+ * optionally have "u,p=#,#", where the final # is the H1 value
+ * The last specific match is used (actually, all are used, but last
+ * one is the one that winds up set); if none at all, fall back on default.
+ */
+static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
+{
+ char *nxt, *str;
+ u32 pidx, unit, port, deflt, h1;
+ unsigned long val;
+ int any = 0, seth1;
+
+ str = txselect_list;
+
+ /* default number is validated in setup_txselect() */
+ deflt = simple_strtoul(str, &nxt, 0);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ dd->pport[pidx].cpspec->no_eep = deflt;
+
+ while (*nxt && nxt[1]) {
+ str = ++nxt;
+ unit = simple_strtoul(str, &nxt, 0);
+ if (nxt == str || !*nxt || *nxt != ',') {
+ while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+ ;
+ continue;
+ }
+ str = ++nxt;
+ port = simple_strtoul(str, &nxt, 0);
+ if (nxt == str || *nxt != '=') {
+ while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+ ;
+ continue;
+ }
+ str = ++nxt;
+ val = simple_strtoul(str, &nxt, 0);
+ if (nxt == str) {
+ while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+ ;
+ continue;
+ }
+ if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
+ continue;
+ seth1 = 0;
+ h1 = 0; /* gcc thinks it might be used uninitted */
+ if (*nxt == ',' && nxt[1]) {
+ str = ++nxt;
+ h1 = (u32)simple_strtoul(str, &nxt, 0);
+ if (nxt == str)
+ while (*nxt && *nxt++ != ' ') /* skip */
+ ;
+ else
+ seth1 = 1;
+ }
+ for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
+ ++pidx) {
+ struct qib_pportdata *ppd = &dd->pport[pidx];
+
+ if (ppd->port != port || !ppd->link_speed_supported)
+ continue;
+ ppd->cpspec->no_eep = val;
+ /* now change the IBC and serdes, overriding generic */
+ init_txdds_table(ppd, 1);
+ any++;
+ }
+ if (*nxt == '\n')
+ break; /* done */
+ }
+ if (change && !any) {
+ /* no specific setting, use the default.
+ * Change the IBC and serdes, but since it's
+ * general, don't override specific settings.
+ */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ init_txdds_table(&dd->pport[pidx], 0);
+ }
+}
+
+/* handle the txselect parameter changing */
+static int setup_txselect(const char *str, struct kernel_param *kp)
+{
+ struct qib_devdata *dd;
+ unsigned long val;
+ char *n;
+ if (strlen(str) >= MAX_ATTEN_LEN) {
+ printk(KERN_INFO QIB_DRV_NAME " txselect_values string "
+ "too long\n");
+ return -ENOSPC;
+ }
+ val = simple_strtoul(str, &n, 0);
+ if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+ printk(KERN_INFO QIB_DRV_NAME
+ "txselect_values must start with a number < %d\n",
+ TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+ return -EINVAL;
+ }
+ strcpy(txselect_list, str);
+
+ list_for_each_entry(dd, &qib_dev_list, list)
+ if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
+ set_no_qsfp_atten(dd, 1);
+ return 0;
+}
+
+/*
+ * Write the final few registers that depend on some of the
+ * init setup. Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_7322_initreg(struct qib_devdata *dd)
+{
+ int ret = 0, n;
+ u64 val;
+
+ qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+ qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+ qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+ qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+ val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+ if (val != dd->pioavailregs_phys) {
+ qib_dev_err(dd, "Catastrophic software error, "
+ "SendPIOAvailAddr written as %lx, "
+ "read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
+ ret = -EINVAL;
+ }
+
+ n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+ qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
+ /* driver sends get pkey, lid, etc. checking also, to catch bugs */
+ qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
+
+ qib_register_observer(dd, &sendctrl_0_observer);
+ qib_register_observer(dd, &sendctrl_1_observer);
+
+ dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
+ qib_write_kreg(dd, kr_control, dd->control);
+ /*
+ * Set SendDmaFetchPriority and init Tx params, including
+ * QSFP handler on boards that have QSFP.
+ * First set our default attenuation entry for cables that
+ * don't have valid attenuation.
+ */
+ set_no_qsfp_atten(dd, 0);
+ for (n = 0; n < dd->num_pports; ++n) {
+ struct qib_pportdata *ppd = dd->pport + n;
+
+ qib_write_kreg_port(ppd, krp_senddmaprioritythld,
+ sdma_fetch_prio & 0xf);
+ /* Initialize qsfp if present on board. */
+ if (dd->flags & QIB_HAS_QSFP)
+ qib_init_7322_qsfp(ppd);
+ }
+ dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
+ qib_write_kreg(dd, kr_control, dd->control);
+
+ return ret;
+}
+
+/* per IB port errors. */
+#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
+ MASK_ACROSS(8, 15))
+#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
+#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
+ MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
+ MASK_ACROSS(0, 11))
+
+/*
+ * Write the initialization per-port registers that need to be done at
+ * driver load and after reset completes (i.e., that aren't done as part
+ * of other init procedures called from qib_init.c).
+ * Some of these should be redundant on reset, but play safe.
+ */
+static void write_7322_init_portregs(struct qib_pportdata *ppd)
+{
+ u64 val;
+ int i;
+
+ if (!ppd->link_speed_supported) {
+ /* no buffer credits for this port */
+ for (i = 1; i < 8; i++)
+ qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ return;
+ }
+
+ /*
+ * Set the number of supported virtual lanes in IBC,
+ * for flow control packet handling on unsupported VLs
+ */
+ val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
+ val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
+ val |= (u64)(ppd->vls_supported - 1) <<
+ SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
+ qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+
+ qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
+
+ /* enable tx header checking */
+ qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
+ IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
+ IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
+
+ qib_write_kreg_port(ppd, krp_ncmodectrl,
+ SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
+
+ /*
+ * Unconditionally clear the bufmask bits. If SDMA is
+ * enabled, we'll set them appropriately later.
+ */
+ qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
+ qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
+ qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
+ if (ppd->dd->cspec->r1)
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
+}
+
+/*
+ * Write the initialization per-device registers that need to be done at
+ * driver load and after reset completes (i.e., that aren't done as part
+ * of other init procedures called from qib_init.c). Also write per-port
+ * registers that are affected by overall device config, such as QP mapping
+ * Some of these should be redundant on reset, but play safe.
+ */
+static void write_7322_initregs(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ int i, pidx;
+ u64 val;
+
+ /* Set Multicast QPs received by port 2 to map to context one. */
+ qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ unsigned n, regno;
+ unsigned long flags;
+
+ if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
+ continue;
+
+ ppd = &dd->pport[pidx];
+
+ /* be paranoid against later code motion, etc. */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* Initialize QP to context mapping */
+ regno = krp_rcvqpmaptable;
+ val = 0;
+ if (dd->num_pports > 1)
+ n = dd->first_user_ctxt / dd->num_pports;
+ else
+ n = dd->first_user_ctxt - 1;
+ for (i = 0; i < 32; ) {
+ unsigned ctxt;
+
+ if (dd->num_pports > 1)
+ ctxt = (i % n) * dd->num_pports + pidx;
+ else if (i % n)
+ ctxt = (i % n) + 1;
+ else
+ ctxt = ppd->hw_pidx;
+ val |= ctxt << (5 * (i % 6));
+ i++;
+ if (i % 6 == 0) {
+ qib_write_kreg_port(ppd, regno, val);
+ val = 0;
+ regno++;
+ }
+ }
+ qib_write_kreg_port(ppd, regno, val);
+ }
+
+ /*
+ * Setup up interrupt mitigation for kernel contexts, but
+ * not user contexts (user contexts use interrupts when
+ * stalled waiting for any packet, so want those interrupts
+ * right away).
+ */
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
+ qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
+ }
+
+ /*
+ * Initialize as (disabled) rcvflow tables. Application code
+ * will setup each flow as it uses the flow.
+ * Doesn't clear any of the error bits that might be set.
+ */
+ val = TIDFLOW_ERRBITS; /* these are W1C */
+ for (i = 0; i < dd->ctxtcnt; i++) {
+ int flow;
+ for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
+ qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
+ }
+
+ /*
+ * dual cards init to dual port recovery, single port cards to
+ * the one port. Dual port cards may later adjust to 1 port,
+ * and then back to dual port if both ports are connected
+ * */
+ if (dd->num_pports)
+ setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
+}
+
+static int qib_init_7322_variables(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ unsigned features, pidx, sbufcnt;
+ int ret, mtu;
+ u32 sbufs, updthresh;
+
+ /* pport structs are contiguous, allocated after devdata */
+ ppd = (struct qib_pportdata *)(dd + 1);
+ dd->pport = ppd;
+ ppd[0].dd = dd;
+ ppd[1].dd = dd;
+
+ dd->cspec = (struct qib_chip_specific *)(ppd + 2);
+
+ ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
+ ppd[1].cpspec = &ppd[0].cpspec[1];
+ ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
+ ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
+
+ spin_lock_init(&dd->cspec->rcvmod_lock);
+ spin_lock_init(&dd->cspec->gpio_lock);
+
+ /* we haven't yet set QIB_PRESENT, so use read directly */
+ dd->revision = readq(&dd->kregbase[kr_revision]);
+
+ if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+ qib_dev_err(dd, "Revision register read failure, "
+ "giving up initialization\n");
+ ret = -ENODEV;
+ goto bail;
+ }
+ dd->flags |= QIB_PRESENT; /* now register routines work */
+
+ dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
+ dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
+ dd->cspec->r1 = dd->minrev == 1;
+
+ get_7322_chip_params(dd);
+ features = qib_7322_boardname(dd);
+
+ /* now that piobcnt2k and 4k set, we can allocate these */
+ sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
+ NUM_VL15_BUFS + BITS_PER_LONG - 1;
+ sbufcnt /= BITS_PER_LONG;
+ dd->cspec->sendchkenable = kmalloc(sbufcnt *
+ sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
+ dd->cspec->sendgrhchk = kmalloc(sbufcnt *
+ sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
+ dd->cspec->sendibchk = kmalloc(sbufcnt *
+ sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
+ if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
+ !dd->cspec->sendibchk) {
+ qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ ppd = dd->pport;
+
+ /*
+ * GPIO bits for TWSI data and clock,
+ * used for serial EEPROM.
+ */
+ dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+ dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+ dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
+
+ dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
+ QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
+ QIB_HAS_THRESH_UPDATE |
+ (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
+ dd->flags |= qib_special_trigger ?
+ QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
+
+ /*
+ * Setup initial values. These may change when PAT is enabled, but
+ * we need these to do initial chip register accesses.
+ */
+ qib_7322_set_baseaddrs(dd);
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+
+ dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
+ /* all hwerrors become interrupts, unless special purposed */
+ dd->cspec->hwerrmask = ~0ULL;
+ /* link_recovery setup causes these errors, so ignore them,
+ * other than clearing them when they occur */
+ dd->cspec->hwerrmask &=
+ ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
+ SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
+ HWE_MASK(LATriggered));
+
+ for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
+ struct qib_chippport_specific *cp = ppd->cpspec;
+ ppd->link_speed_supported = features & PORT_SPD_CAP;
+ features >>= PORT_SPD_CAP_SHIFT;
+ if (!ppd->link_speed_supported) {
+ /* single port mode (7340, or configured) */
+ dd->skip_kctxt_mask |= 1 << pidx;
+ if (pidx == 0) {
+ /* Make sure port is disabled. */
+ qib_write_kreg_port(ppd, krp_rcvctrl, 0);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
+ ppd[0] = ppd[1];
+ dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
+ IBSerdesPClkNotDetectMask_0)
+ | SYM_MASK(HwErrMask,
+ SDmaMemReadErrMask_0));
+ dd->cspec->int_enable_mask &= ~(
+ SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
+ SYM_MASK(IntMask, SDmaIdleIntMask_0) |
+ SYM_MASK(IntMask, SDmaProgressIntMask_0) |
+ SYM_MASK(IntMask, SDmaIntMask_0) |
+ SYM_MASK(IntMask, ErrIntMask_0) |
+ SYM_MASK(IntMask, SendDoneIntMask_0));
+ } else {
+ /* Make sure port is disabled. */
+ qib_write_kreg_port(ppd, krp_rcvctrl, 0);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
+ dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
+ IBSerdesPClkNotDetectMask_1)
+ | SYM_MASK(HwErrMask,
+ SDmaMemReadErrMask_1));
+ dd->cspec->int_enable_mask &= ~(
+ SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
+ SYM_MASK(IntMask, SDmaIdleIntMask_1) |
+ SYM_MASK(IntMask, SDmaProgressIntMask_1) |
+ SYM_MASK(IntMask, SDmaIntMask_1) |
+ SYM_MASK(IntMask, ErrIntMask_1) |
+ SYM_MASK(IntMask, SendDoneIntMask_1));
+ }
+ continue;
+ }
+
+ dd->num_pports++;
+ qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
+
+ ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+ ppd->link_width_enabled = IB_WIDTH_4X;
+ ppd->link_speed_enabled = ppd->link_speed_supported;
+ /*
+ * Set the initial values to reasonable default, will be set
+ * for real when link is up.
+ */
+ ppd->link_width_active = IB_WIDTH_4X;
+ ppd->link_speed_active = QIB_IB_SDR;
+ ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
+ switch (qib_num_cfg_vls) {
+ case 1:
+ ppd->vls_supported = IB_VL_VL0;
+ break;
+ case 2:
+ ppd->vls_supported = IB_VL_VL0_1;
+ break;
+ default:
+ qib_devinfo(dd->pcidev,
+ "Invalid num_vls %u, using 4 VLs\n",
+ qib_num_cfg_vls);
+ qib_num_cfg_vls = 4;
+ /* fall through */
+ case 4:
+ ppd->vls_supported = IB_VL_VL0_3;
+ break;
+ case 8:
+ if (mtu <= 2048)
+ ppd->vls_supported = IB_VL_VL0_7;
+ else {
+ qib_devinfo(dd->pcidev,
+ "Invalid num_vls %u for MTU %d "
+ ", using 4 VLs\n",
+ qib_num_cfg_vls, mtu);
+ ppd->vls_supported = IB_VL_VL0_3;
+ qib_num_cfg_vls = 4;
+ }
+ break;
+ }
+ ppd->vls_operational = ppd->vls_supported;
+
+ init_waitqueue_head(&cp->autoneg_wait);
+ INIT_DELAYED_WORK(&cp->autoneg_work,
+ autoneg_7322_work);
+ if (ppd->dd->cspec->r1)
+ INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
+
+ /*
+ * For Mez and similar cards, no qsfp info, so do
+ * the "cable info" setup here. Can be overridden
+ * in adapter-specific routines.
+ */
+ if (!(ppd->dd->flags & QIB_HAS_QSFP)) {
+ if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd))
+ qib_devinfo(ppd->dd->pcidev, "IB%u:%u: "
+ "Unknown mezzanine card type\n",
+ dd->unit, ppd->port);
+ cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
+ /*
+ * Choose center value as default tx serdes setting
+ * until changed through module parameter.
+ */
+ ppd->cpspec->no_eep = IS_QMH(dd) ?
+ TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
+ } else
+ cp->h1_val = H1_FORCE_VAL;
+
+ /* Avoid writes to chip for mini_init */
+ if (!qib_mini_init)
+ write_7322_init_portregs(ppd);
+
+ init_timer(&cp->chase_timer);
+ cp->chase_timer.function = reenable_chase;
+ cp->chase_timer.data = (unsigned long)ppd;
+
+ ppd++;
+ }
+
+ dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+ dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+ dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
+
+ /* we always allocate at least 2048 bytes for eager buffers */
+ dd->rcvegrbufsize = max(mtu, 2048);
+
+ qib_7322_tidtemplate(dd);
+
+ /*
+ * We can request a receive interrupt for 1 or
+ * more packets from current offset.
+ */
+ dd->rhdrhead_intr_off =
+ (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
+
+ /* setup the stats timer; the add_timer is done at end of init */
+ init_timer(&dd->stats_timer);
+ dd->stats_timer.function = qib_get_7322_faststats;
+ dd->stats_timer.data = (unsigned long) dd;
+
+ dd->ureg_align = 0x10000; /* 64KB alignment */
+
+ dd->piosize2kmax_dwords = dd->piosize2k >> 2;
+
+ qib_7322_config_ctxts(dd);
+ qib_set_ctxtcnt(dd);
+
+ if (qib_wc_pat) {
+ ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k);
+ if (ret)
+ goto bail;
+ }
+ qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
+
+ ret = 0;
+ if (qib_mini_init)
+ goto bail;
+ if (!dd->num_pports) {
+ qib_dev_err(dd, "No ports enabled, giving up initialization\n");
+ goto bail; /* no error, so can still figure out why err */
+ }
+
+ write_7322_initregs(dd);
+ ret = qib_create_ctxts(dd);
+ init_7322_cntrnames(dd);
+
+ updthresh = 8U; /* update threshold */
+
+ /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
+ * reserve the update threshold amount for other kernel use, such
+ * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
+ * unless we aren't enabling SDMA, in which case we want to use
+ * all the 4k bufs for the kernel.
+ * if this was less than the update threshold, we could wait
+ * a long time for an update. Coded this way because we
+ * sometimes change the update threshold for various reasons,
+ * and we want this to remain robust.
+ */
+ if (dd->flags & QIB_HAS_SEND_DMA) {
+ dd->cspec->sdmabufcnt = dd->piobcnt4k;
+ sbufs = updthresh > 3 ? updthresh : 3;
+ } else {
+ dd->cspec->sdmabufcnt = 0;
+ sbufs = dd->piobcnt4k;
+ }
+ dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
+ dd->cspec->sdmabufcnt;
+ dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
+ dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
+ dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
+ dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
+
+ /*
+ * If we have 16 user contexts, we will have 7 sbufs
+ * per context, so reduce the update threshold to match. We
+ * want to update before we actually run out, at low pbufs/ctxt
+ * so give ourselves some margin.
+ */
+ if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
+ updthresh = dd->pbufsctxt - 2;
+ dd->cspec->updthresh_dflt = updthresh;
+ dd->cspec->updthresh = updthresh;
+
+ /* before full enable, no interrupts, no locking needed */
+ dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld)) |
+ SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
+
+ dd->psxmitwait_supported = 1;
+ dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
+bail:
+ if (!dd->ctxtcnt)
+ dd->ctxtcnt = 1; /* for other initialization code */
+
+ return ret;
+}
+
+static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+ u32 *pbufnum)
+{
+ u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+ struct qib_devdata *dd = ppd->dd;
+
+ /* last is same for 2k and 4k, because we use 4k if all 2k busy */
+ if (pbc & PBC_7322_VL15_SEND) {
+ first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
+ last = first;
+ } else {
+ if ((plen + 1) > dd->piosize2kmax_dwords)
+ first = dd->piobcnt2k;
+ else
+ first = 0;
+ last = dd->cspec->lastbuf_for_pio;
+ }
+ return qib_getsendbuf_range(dd, pbufnum, first, last);
+}
+
+static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
+ u32 start)
+{
+ qib_write_kreg_port(ppd, krp_psinterval, intv);
+ qib_write_kreg_port(ppd, krp_psstart, start);
+}
+
+/*
+ * Must be called with sdma_lock held, or before init finished.
+ */
+static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+ qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
+}
+
+static struct sdma_set_state_action sdma_7322_action_table[] = {
+ [qib_sdma_state_s00_hw_down] = {
+ .go_s99_running_tofalse = 1,
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s10_hw_start_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s20_idle] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s30_sw_clean_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s40_hw_clean_up_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s50_hw_halt_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 1,
+ },
+ [qib_sdma_state_s99_running] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .op_drain = 0,
+ .go_s99_running_totrue = 1,
+ },
+};
+
+static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
+{
+ ppd->sdma_state.set_state_action = sdma_7322_action_table;
+}
+
+static int init_sdma_7322_regs(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned lastbuf, erstbuf;
+ u64 senddmabufmask[3] = { 0 };
+ int n, ret = 0;
+
+ qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
+ qib_sdma_7322_setlengen(ppd);
+ qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
+ qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
+ qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
+ qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
+
+ if (dd->num_pports)
+ n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
+ else
+ n = dd->cspec->sdmabufcnt; /* failsafe for init */
+ erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
+ ((dd->num_pports == 1 || ppd->port == 2) ? n :
+ dd->cspec->sdmabufcnt);
+ lastbuf = erstbuf + n;
+
+ ppd->sdma_state.first_sendbuf = erstbuf;
+ ppd->sdma_state.last_sendbuf = lastbuf;
+ for (; erstbuf < lastbuf; ++erstbuf) {
+ unsigned word = erstbuf / BITS_PER_LONG;
+ unsigned bit = erstbuf & (BITS_PER_LONG - 1);
+
+ BUG_ON(word >= 3);
+ senddmabufmask[word] |= 1ULL << bit;
+ }
+ qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
+ qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
+ qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
+ return ret;
+}
+
+/* sdma_lock must be held */
+static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int sane;
+ int use_dmahead;
+ u16 swhead;
+ u16 swtail;
+ u16 cnt;
+ u16 hwhead;
+
+ use_dmahead = __qib_sdma_running(ppd) &&
+ (dd->flags & QIB_HAS_SDMA_TIMEOUT);
+retry:
+ hwhead = use_dmahead ?
+ (u16) le64_to_cpu(*ppd->sdma_head_dma) :
+ (u16) qib_read_kreg_port(ppd, krp_senddmahead);
+
+ swhead = ppd->sdma_descq_head;
+ swtail = ppd->sdma_descq_tail;
+ cnt = ppd->sdma_descq_cnt;
+
+ if (swhead < swtail)
+ /* not wrapped */
+ sane = (hwhead >= swhead) & (hwhead <= swtail);
+ else if (swhead > swtail)
+ /* wrapped around */
+ sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
+ (hwhead <= swtail);
+ else
+ /* empty */
+ sane = (hwhead == swhead);
+
+ if (unlikely(!sane)) {
+ if (use_dmahead) {
+ /* try one more time, directly from the register */
+ use_dmahead = 0;
+ goto retry;
+ }
+ /* proceed as if no progress */
+ hwhead = swhead;
+ }
+
+ return hwhead;
+}
+
+static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
+{
+ u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
+
+ return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
+ (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
+ !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
+ !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
+}
+
+/*
+ * Compute the amount of delay before sending the next packet if the
+ * port's send rate differs from the static rate set for the QP.
+ * The delay affects the next packet and the amount of the delay is
+ * based on the length of the this packet.
+ */
+static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+ u8 srate, u8 vl)
+{
+ u8 snd_mult = ppd->delay_mult;
+ u8 rcv_mult = ib_rate_to_delay[srate];
+ u32 ret;
+
+ ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
+
+ /* Indicate VL15, else set the VL in the control word */
+ if (vl == 15)
+ ret |= PBC_7322_VL15_SEND_CTRL;
+ else
+ ret |= vl << PBC_VL_NUM_LSB;
+ ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
+
+ return ret;
+}
+
+/*
+ * Enable the per-port VL15 send buffers for use.
+ * They follow the rest of the buffers, without a config parameter.
+ * This was in initregs, but that is done before the shadow
+ * is set up, and this has to be done after the shadow is
+ * set up.
+ */
+static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
+{
+ unsigned vl15bufs;
+
+ vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
+ qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
+ TXCHK_CHG_TYPE_KERN, NULL);
+}
+
+static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
+{
+ if (rcd->ctxt < NUM_IB_PORTS) {
+ if (rcd->dd->num_pports > 1) {
+ rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
+ rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
+ } else {
+ rcd->rcvegrcnt = KCTXT0_EGRCNT;
+ rcd->rcvegr_tid_base = 0;
+ }
+ } else {
+ rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
+ rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
+ (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
+ }
+}
+
+#define QTXSLEEPS 5000
+static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
+ u32 len, u32 which, struct qib_ctxtdata *rcd)
+{
+ int i;
+ const int last = start + len - 1;
+ const int lastr = last / BITS_PER_LONG;
+ u32 sleeps = 0;
+ int wait = rcd != NULL;
+ unsigned long flags;
+
+ while (wait) {
+ unsigned long shadow;
+ int cstart, previ = -1;
+
+ /*
+ * when flipping from kernel to user, we can't change
+ * the checking type if the buffer is allocated to the
+ * driver. It's OK the other direction, because it's
+ * from close, and we have just disarm'ed all the
+ * buffers. All the kernel to kernel changes are also
+ * OK.
+ */
+ for (cstart = start; cstart <= last; cstart++) {
+ i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
+ / BITS_PER_LONG;
+ if (i != previ) {
+ shadow = (unsigned long)
+ le64_to_cpu(dd->pioavailregs_dma[i]);
+ previ = i;
+ }
+ if (test_bit(((2 * cstart) +
+ QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
+ % BITS_PER_LONG, &shadow))
+ break;
+ }
+
+ if (cstart > last)
+ break;
+
+ if (sleeps == QTXSLEEPS)
+ break;
+ /* make sure we see an updated copy next time around */
+ sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ sleeps++;
+ msleep(1);
+ }
+
+ switch (which) {
+ case TXCHK_CHG_TYPE_DIS1:
+ /*
+ * disable checking on a range; used by diags; just
+ * one buffer, but still written generically
+ */
+ for (i = start; i <= last; i++)
+ clear_bit(i, dd->cspec->sendchkenable);
+ break;
+
+ case TXCHK_CHG_TYPE_ENAB1:
+ /*
+ * (re)enable checking on a range; used by diags; just
+ * one buffer, but still written generically; read
+ * scratch to be sure buffer actually triggered, not
+ * just flushed from processor.
+ */
+ qib_read_kreg32(dd, kr_scratch);
+ for (i = start; i <= last; i++)
+ set_bit(i, dd->cspec->sendchkenable);
+ break;
+
+ case TXCHK_CHG_TYPE_KERN:
+ /* usable by kernel */
+ for (i = start; i <= last; i++) {
+ set_bit(i, dd->cspec->sendibchk);
+ clear_bit(i, dd->cspec->sendgrhchk);
+ }
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ /* see if we need to raise avail update threshold */
+ for (i = dd->first_user_ctxt;
+ dd->cspec->updthresh != dd->cspec->updthresh_dflt
+ && i < dd->cfgctxts; i++)
+ if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
+ ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
+ < dd->cspec->updthresh_dflt)
+ break;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ if (i == dd->cfgctxts) {
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ dd->cspec->updthresh = dd->cspec->updthresh_dflt;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld)) <<
+ SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ }
+ break;
+
+ case TXCHK_CHG_TYPE_USER:
+ /* for user process */
+ for (i = start; i <= last; i++) {
+ clear_bit(i, dd->cspec->sendibchk);
+ set_bit(i, dd->cspec->sendgrhchk);
+ }
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
+ / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
+ dd->cspec->updthresh = (rcd->piocnt /
+ rcd->subctxt_cnt) - 1;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ } else
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
+ qib_write_kreg(dd, kr_sendcheckmask + i,
+ dd->cspec->sendchkenable[i]);
+
+ for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
+ qib_write_kreg(dd, kr_sendgrhcheckmask + i,
+ dd->cspec->sendgrhchk[i]);
+ qib_write_kreg(dd, kr_sendibpktmask + i,
+ dd->cspec->sendibchk[i]);
+ }
+
+ /*
+ * Be sure whatever we did was seen by the chip and acted upon,
+ * before we return. Mostly important for which >= 2.
+ */
+ qib_read_kreg32(dd, kr_scratch);
+}
+
+
+/* useful for trigger analyzers, etc. */
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+ qib_write_kreg(dd, kr_scratch, val);
+}
+
+/* Dummy for now, use chip regs soon */
+static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+ return -ENXIO;
+}
+
+/**
+ * qib_init_iba7322_funcs - set up the chip-specific function pointers
+ * @dev: the pci_dev for qlogic_ib device
+ * @ent: pci_device_id struct for this dev
+ *
+ * Also allocates, inits, and returns the devdata struct for this
+ * device instance
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct qib_devdata *dd;
+ int ret, i;
+ u32 tabsize, actual_cnt = 0;
+
+ dd = qib_alloc_devdata(pdev,
+ NUM_IB_PORTS * sizeof(struct qib_pportdata) +
+ sizeof(struct qib_chip_specific) +
+ NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
+ if (IS_ERR(dd))
+ goto bail;
+
+ dd->f_bringup_serdes = qib_7322_bringup_serdes;
+ dd->f_cleanup = qib_setup_7322_cleanup;
+ dd->f_clear_tids = qib_7322_clear_tids;
+ dd->f_free_irq = qib_7322_free_irq;
+ dd->f_get_base_info = qib_7322_get_base_info;
+ dd->f_get_msgheader = qib_7322_get_msgheader;
+ dd->f_getsendbuf = qib_7322_getsendbuf;
+ dd->f_gpio_mod = gpio_7322_mod;
+ dd->f_eeprom_wen = qib_7322_eeprom_wen;
+ dd->f_hdrqempty = qib_7322_hdrqempty;
+ dd->f_ib_updown = qib_7322_ib_updown;
+ dd->f_init_ctxt = qib_7322_init_ctxt;
+ dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
+ dd->f_intr_fallback = qib_7322_intr_fallback;
+ dd->f_late_initreg = qib_late_7322_initreg;
+ dd->f_setpbc_control = qib_7322_setpbc_control;
+ dd->f_portcntr = qib_portcntr_7322;
+ dd->f_put_tid = qib_7322_put_tid;
+ dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
+ dd->f_rcvctrl = rcvctrl_7322_mod;
+ dd->f_read_cntrs = qib_read_7322cntrs;
+ dd->f_read_portcntrs = qib_read_7322portcntrs;
+ dd->f_reset = qib_do_7322_reset;
+ dd->f_init_sdma_regs = init_sdma_7322_regs;
+ dd->f_sdma_busy = qib_sdma_7322_busy;
+ dd->f_sdma_gethead = qib_sdma_7322_gethead;
+ dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
+ dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
+ dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
+ dd->f_sendctrl = sendctrl_7322_mod;
+ dd->f_set_armlaunch = qib_set_7322_armlaunch;
+ dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
+ dd->f_iblink_state = qib_7322_iblink_state;
+ dd->f_ibphys_portstate = qib_7322_phys_portstate;
+ dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
+ dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
+ dd->f_set_ib_loopback = qib_7322_set_loopback;
+ dd->f_get_ib_table = qib_7322_get_ib_table;
+ dd->f_set_ib_table = qib_7322_set_ib_table;
+ dd->f_set_intr_state = qib_7322_set_intr_state;
+ dd->f_setextled = qib_setup_7322_setextled;
+ dd->f_txchk_change = qib_7322_txchk_change;
+ dd->f_update_usrhead = qib_update_7322_usrhead;
+ dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
+ dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
+ dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
+ dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
+ dd->f_sdma_init_early = qib_7322_sdma_init_early;
+ dd->f_writescratch = writescratch;
+ dd->f_tempsense_rd = qib_7322_tempsense_rd;
+ /*
+ * Do remaining PCIe setup and save PCIe values in dd.
+ * Any error printing is already done by the init code.
+ * On return, we have the chip mapped, but chip registers
+ * are not set up until start of qib_init_7322_variables.
+ */
+ ret = qib_pcie_ddinit(dd, pdev, ent);
+ if (ret < 0)
+ goto bail_free;
+
+ /* initialize chip-specific variables */
+ ret = qib_init_7322_variables(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ if (qib_mini_init || !dd->num_pports)
+ goto bail;
+
+ /*
+ * Determine number of vectors we want; depends on port count
+ * and number of configured kernel receive queues actually used.
+ * Should also depend on whether sdma is enabled or not, but
+ * that's such a rare testing case it's not worth worrying about.
+ */
+ tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
+ for (i = 0; i < tabsize; i++)
+ if ((i < ARRAY_SIZE(irq_table) &&
+ irq_table[i].port <= dd->num_pports) ||
+ (i >= ARRAY_SIZE(irq_table) &&
+ dd->rcd[i - ARRAY_SIZE(irq_table)]))
+ actual_cnt++;
+ tabsize = actual_cnt;
+ dd->cspec->msix_entries = kmalloc(tabsize *
+ sizeof(struct msix_entry), GFP_KERNEL);
+ dd->cspec->msix_arg = kmalloc(tabsize *
+ sizeof(void *), GFP_KERNEL);
+ if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
+ qib_dev_err(dd, "No memory for MSIx table\n");
+ tabsize = 0;
+ }
+ for (i = 0; i < tabsize; i++)
+ dd->cspec->msix_entries[i].entry = i;
+
+ if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
+ qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+ /* may be less than we wanted, if not enough available */
+ dd->cspec->num_msix_entries = tabsize;
+
+ /* setup interrupt handler */
+ qib_setup_7322_interrupt(dd, 1);
+
+ /* clear diagctrl register, in case diags were running and crashed */
+ qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+ goto bail;
+
+bail_cleanup:
+ qib_pcie_ddcleanup(dd);
+bail_free:
+ qib_free_devdata(dd);
+ dd = ERR_PTR(ret);
+bail:
+ return dd;
+}
+
+/*
+ * Set the table entry at the specified index from the table specifed.
+ * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
+ * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
+ * 'idx' below addresses the correct entry, while its 4 LSBs select the
+ * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
+ */
+#define DDS_ENT_AMP_LSB 14
+#define DDS_ENT_MAIN_LSB 9
+#define DDS_ENT_POST_LSB 5
+#define DDS_ENT_PRE_XTRA_LSB 3
+#define DDS_ENT_PRE_LSB 0
+
+/*
+ * Set one entry in the TxDDS table for spec'd port
+ * ridx picks one of the entries, while tp points
+ * to the appropriate table entry.
+ */
+static void set_txdds(struct qib_pportdata *ppd, int ridx,
+ const struct txdds_ent *tp)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 pack_ent;
+ int regidx;
+
+ /* Get correct offset in chip-space, and in source table */
+ regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
+ /*
+ * We do not use qib_write_kreg_port() because it was intended
+ * only for registers in the lower "port specific" pages.
+ * So do index calculation by hand.
+ */
+ if (ppd->hw_pidx)
+ regidx += (dd->palign / sizeof(u64));
+
+ pack_ent = tp->amp << DDS_ENT_AMP_LSB;
+ pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
+ pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
+ pack_ent |= tp->post << DDS_ENT_POST_LSB;
+ qib_write_kreg(dd, regidx, pack_ent);
+ /* Prevent back-to-back writes by hitting scratch */
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+static const struct vendor_txdds_ent vendor_txdds[] = {
+ { /* Amphenol 1m 30awg NoEq */
+ { 0x41, 0x50, 0x48 }, "584470002 ",
+ { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
+ },
+ { /* Amphenol 3m 28awg NoEq */
+ { 0x41, 0x50, 0x48 }, "584470004 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
+ },
+ { /* Finisar 3m OM2 Optical */
+ { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
+ { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
+ },
+ { /* Finisar 30m OM2 Optical */
+ { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
+ { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
+ },
+ { /* Finisar Default OM2 Optical */
+ { 0x00, 0x90, 0x65 }, NULL,
+ { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
+ },
+ { /* Gore 1m 30awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
+ },
+ { /* Gore 2m 30awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
+ },
+ { /* Gore 1m 28awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
+ },
+ { /* Gore 3m 28awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
+ { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
+ },
+ { /* Gore 5m 24awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
+ { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
+ },
+ { /* Gore 7m 24awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
+ { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
+ },
+ { /* Gore 5m 26awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
+ },
+ { /* Gore 7m 26awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
+ },
+ { /* Intersil 12m 24awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
+ { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
+ },
+ { /* Intersil 10m 28awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
+ },
+ { /* Intersil 7m 30awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
+ },
+ { /* Intersil 5m 32awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
+ },
+ { /* Intersil Default Active */
+ { 0x00, 0x30, 0xB4 }, NULL,
+ { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
+ },
+ { /* Luxtera 20m Active Optical */
+ { 0x00, 0x25, 0x63 }, NULL,
+ { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
+ },
+ { /* Molex 1M Cu loopback */
+ { 0x00, 0x09, 0x3A }, "74763-0025 ",
+ { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
+ },
+ { /* Molex 2m 28awg NoEq */
+ { 0x00, 0x09, 0x3A }, "74757-2201 ",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
+ },
+};
+
+static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
+ /* amp, pre, main, post */
+ { 2, 2, 15, 6 }, /* Loopback */
+ { 0, 0, 0, 1 }, /* 2 dB */
+ { 0, 0, 0, 2 }, /* 3 dB */
+ { 0, 0, 0, 3 }, /* 4 dB */
+ { 0, 0, 0, 4 }, /* 5 dB */
+ { 0, 0, 0, 5 }, /* 6 dB */
+ { 0, 0, 0, 6 }, /* 7 dB */
+ { 0, 0, 0, 7 }, /* 8 dB */
+ { 0, 0, 0, 8 }, /* 9 dB */
+ { 0, 0, 0, 9 }, /* 10 dB */
+ { 0, 0, 0, 10 }, /* 11 dB */
+ { 0, 0, 0, 11 }, /* 12 dB */
+ { 0, 0, 0, 12 }, /* 13 dB */
+ { 0, 0, 0, 13 }, /* 14 dB */
+ { 0, 0, 0, 14 }, /* 15 dB */
+ { 0, 0, 0, 15 }, /* 16 dB */
+};
+
+static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
+ /* amp, pre, main, post */
+ { 2, 2, 15, 6 }, /* Loopback */
+ { 0, 0, 0, 8 }, /* 2 dB */
+ { 0, 0, 0, 8 }, /* 3 dB */
+ { 0, 0, 0, 9 }, /* 4 dB */
+ { 0, 0, 0, 9 }, /* 5 dB */
+ { 0, 0, 0, 10 }, /* 6 dB */
+ { 0, 0, 0, 10 }, /* 7 dB */
+ { 0, 0, 0, 11 }, /* 8 dB */
+ { 0, 0, 0, 11 }, /* 9 dB */
+ { 0, 0, 0, 12 }, /* 10 dB */
+ { 0, 0, 0, 12 }, /* 11 dB */
+ { 0, 0, 0, 13 }, /* 12 dB */
+ { 0, 0, 0, 13 }, /* 13 dB */
+ { 0, 0, 0, 14 }, /* 14 dB */
+ { 0, 0, 0, 14 }, /* 15 dB */
+ { 0, 0, 0, 15 }, /* 16 dB */
+};
+
+static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
+ /* amp, pre, main, post */
+ { 2, 2, 15, 6 }, /* Loopback */
+ { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
+ { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
+ { 0, 1, 0, 11 }, /* 4 dB */
+ { 0, 1, 0, 13 }, /* 5 dB */
+ { 0, 1, 0, 15 }, /* 6 dB */
+ { 0, 1, 3, 15 }, /* 7 dB */
+ { 0, 1, 7, 15 }, /* 8 dB */
+ { 0, 1, 7, 15 }, /* 9 dB */
+ { 0, 1, 8, 15 }, /* 10 dB */
+ { 0, 1, 9, 15 }, /* 11 dB */
+ { 0, 1, 10, 15 }, /* 12 dB */
+ { 0, 2, 6, 15 }, /* 13 dB */
+ { 0, 2, 7, 15 }, /* 14 dB */
+ { 0, 2, 8, 15 }, /* 15 dB */
+ { 0, 2, 9, 15 }, /* 16 dB */
+};
+
+/*
+ * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
+ * These are mostly used for mez cards going through connectors
+ * and backplane traces, but can be used to add other "unusual"
+ * table values as well.
+ */
+static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
+ /* amp, pre, main, post */
+ { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+};
+
+static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
+ /* amp, pre, main, post */
+ { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+};
+
+static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
+ /* amp, pre, main, post */
+ { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
+ { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
+ { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
+ { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
+ { 0, 1, 12, 10 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 11 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 12 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 14 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 6 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 7 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 8 }, /* QME7342 backplane setting */
+};
+
+static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
+ unsigned atten)
+{
+ /*
+ * The attenuation table starts at 2dB for entry 1,
+ * with entry 0 being the loopback entry.
+ */
+ if (atten <= 2)
+ atten = 1;
+ else if (atten > TXDDS_TABLE_SZ)
+ atten = TXDDS_TABLE_SZ - 1;
+ else
+ atten--;
+ return txdds + atten;
+}
+
+/*
+ * if override is set, the module parameter txselect has a value
+ * for this specific port, so use it, rather than our normal mechanism.
+ */
+static void find_best_ent(struct qib_pportdata *ppd,
+ const struct txdds_ent **sdr_dds,
+ const struct txdds_ent **ddr_dds,
+ const struct txdds_ent **qdr_dds, int override)
+{
+ struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
+ int idx;
+
+ /* Search table of known cables */
+ for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
+ const struct vendor_txdds_ent *v = vendor_txdds + idx;
+
+ if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
+ (!v->partnum ||
+ !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
+ *sdr_dds = &v->sdr;
+ *ddr_dds = &v->ddr;
+ *qdr_dds = &v->qdr;
+ return;
+ }
+ }
+
+ /* Lookup serdes setting by cable type and attenuation */
+ if (!override && QSFP_IS_ACTIVE(qd->tech)) {
+ *sdr_dds = txdds_sdr + ppd->dd->board_atten;
+ *ddr_dds = txdds_ddr + ppd->dd->board_atten;
+ *qdr_dds = txdds_qdr + ppd->dd->board_atten;
+ return;
+ }
+
+ if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
+ qd->atten[1])) {
+ *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
+ *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
+ *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
+ return;
+ } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
+ /*
+ * If we have no (or incomplete) data from the cable
+ * EEPROM, or no QSFP, or override is set, use the
+ * module parameter value to index into the attentuation
+ * table.
+ */
+ idx = ppd->cpspec->no_eep;
+ *sdr_dds = &txdds_sdr[idx];
+ *ddr_dds = &txdds_ddr[idx];
+ *qdr_dds = &txdds_qdr[idx];
+ } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+ /* similar to above, but index into the "extra" table. */
+ idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
+ *sdr_dds = &txdds_extra_sdr[idx];
+ *ddr_dds = &txdds_extra_ddr[idx];
+ *qdr_dds = &txdds_extra_qdr[idx];
+ } else {
+ /* this shouldn't happen, it's range checked */
+ *sdr_dds = txdds_sdr + qib_long_atten;
+ *ddr_dds = txdds_ddr + qib_long_atten;
+ *qdr_dds = txdds_qdr + qib_long_atten;
+ }
+}
+
+static void init_txdds_table(struct qib_pportdata *ppd, int override)
+{
+ const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
+ struct txdds_ent *dds;
+ int idx;
+ int single_ent = 0;
+
+ find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
+
+ /* for mez cards or override, use the selected value for all entries */
+ if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
+ single_ent = 1;
+
+ /* Fill in the first entry with the best entry found. */
+ set_txdds(ppd, 0, sdr_dds);
+ set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
+ set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
+ if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE)) {
+ dds = (struct txdds_ent *)(ppd->link_speed_active ==
+ QIB_IB_QDR ? qdr_dds :
+ (ppd->link_speed_active ==
+ QIB_IB_DDR ? ddr_dds : sdr_dds));
+ write_tx_serdes_param(ppd, dds);
+ }
+
+ /* Fill in the remaining entries with the default table values. */
+ for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
+ set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
+ set_txdds(ppd, idx + TXDDS_TABLE_SZ,
+ single_ent ? ddr_dds : txdds_ddr + idx);
+ set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
+ single_ent ? qdr_dds : txdds_qdr + idx);
+ }
+}
+
+#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
+#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
+#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
+#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
+#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
+#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
+#define AHB_TRANS_TRIES 10
+
+/*
+ * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
+ * 5=subsystem which is why most calls have "chan + chan >> 1"
+ * for the channel argument.
+ */
+static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
+ u32 data, u32 mask)
+{
+ u32 rd_data, wr_data, sz_mask;
+ u64 trans, acc, prev_acc;
+ u32 ret = 0xBAD0BAD;
+ int tries;
+
+ prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
+ /* From this point on, make sure we return access */
+ acc = (quad << 1) | 1;
+ qib_write_kreg(dd, KR_AHB_ACC, acc);
+
+ for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ if (trans & AHB_TRANS_RDY)
+ break;
+ }
+ if (tries >= AHB_TRANS_TRIES) {
+ qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
+ goto bail;
+ }
+
+ /* If mask is not all 1s, we need to read, but different SerDes
+ * entities have different sizes
+ */
+ sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
+ wr_data = data & mask & sz_mask;
+ if ((~mask & sz_mask) != 0) {
+ trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
+ qib_write_kreg(dd, KR_AHB_TRANS, trans);
+
+ for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ if (trans & AHB_TRANS_RDY)
+ break;
+ }
+ if (tries >= AHB_TRANS_TRIES) {
+ qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
+ AHB_TRANS_TRIES);
+ goto bail;
+ }
+ /* Re-read in case host split reads and read data first */
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
+ wr_data |= (rd_data & ~mask & sz_mask);
+ }
+
+ /* If mask is not zero, we need to write. */
+ if (mask & sz_mask) {
+ trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
+ trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
+ trans |= AHB_WR;
+ qib_write_kreg(dd, KR_AHB_TRANS, trans);
+
+ for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ if (trans & AHB_TRANS_RDY)
+ break;
+ }
+ if (tries >= AHB_TRANS_TRIES) {
+ qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
+ AHB_TRANS_TRIES);
+ goto bail;
+ }
+ }
+ ret = wr_data;
+bail:
+ qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
+ return ret;
+}
+
+static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
+ unsigned mask)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int chan;
+ u32 rbc;
+
+ for (chan = 0; chan < SERDES_CHANS; ++chan) {
+ ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
+ data, mask);
+ rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ addr, 0, 0);
+ }
+}
+
+static int serdes_7322_init(struct qib_pportdata *ppd)
+{
+ u64 data;
+ u32 le_val;
+
+ /*
+ * Initialize the Tx DDS tables. Also done every QSFP event,
+ * for adapters with QSFP
+ */
+ init_txdds_table(ppd, 0);
+
+ /* ensure no tx overrides from earlier driver loads */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
+ /* Patch some SerDes defaults to "Better for IB" */
+ /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
+ ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
+
+ /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
+ ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
+ /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
+ ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
+
+ /* May be overridden in qsfp_7322_event */
+ le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
+ ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
+
+ /* enable LE1 adaptation for all but QME, which is disabled */
+ le_val = IS_QME(ppd->dd) ? 0 : 1;
+ ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
+
+ /* Clear cmode-override, may be set from older driver */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
+
+ /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
+ ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
+
+ /* setup LoS params; these are subsystem, so chan == 5 */
+ /* LoS filter threshold_count on, ch 0-3, set to 8 */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
+
+ /* LoS filter threshold_count off, ch 0-3, set to 4 */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
+
+ /* LoS filter select enabled */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
+
+ /* LoS target data: SDR=4, DDR=2, QDR=1 */
+ ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
+ ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
+ ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
+
+ data = qib_read_kreg_port(ppd, krp_serdesctrl);
+ qib_write_kreg_port(ppd, krp_serdesctrl, data |
+ SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
+
+ /* rxbistena; set 0 to avoid effects of it switch later */
+ ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
+
+ /* Configure 4 DFE taps, and only they adapt */
+ ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
+
+ /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
+ le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
+ ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
+
+ /*
+ * Set receive adaptation mode. SDR and DDR adaptation are
+ * always on, and QDR is initially enabled; later disabled.
+ */
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
+ ppd->cpspec->qdr_dfe_on = 1;
+
+ /* FLoop LOS gate: PPM filter enabled */
+ ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
+
+ /* rx offset center enabled */
+ ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
+
+ if (!ppd->dd->cspec->r1) {
+ ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
+ ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
+ }
+
+ /* Set the frequency loop bandwidth to 15 */
+ ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
+
+ return 0;
+}
+
+/* start adjust QMH serdes parameters */
+
+static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
+{
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 9, code << 9, 0x3f << 9);
+}
+
+static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
+ int enable, u32 tapenable)
+{
+ if (enable)
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 1, 3 << 10, 0x1f << 10);
+ else
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 1, 0, 0x1f << 10);
+}
+
+/* Set clock to 1, 0, 1, 0 */
+static void clock_man(struct qib_pportdata *ppd, int chan)
+{
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0x4000, 0x4000);
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0, 0x4000);
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0x4000, 0x4000);
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0, 0x4000);
+}
+
+/*
+ * write the current Tx serdes pre,post,main,amp settings into the serdes.
+ * The caller must pass the settings appropriate for the current speed,
+ * or not care if they are correct for the current speed.
+ */
+static void write_tx_serdes_param(struct qib_pportdata *ppd,
+ struct txdds_ent *txdds)
+{
+ u64 deemph;
+
+ deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
+ /* field names for amp, main, post, pre, respectively */
+ deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
+
+ deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ tx_override_deemphasis_select);
+ deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txampcntl_d2a);
+ deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txc0_ena);
+ deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcp1_ena);
+ deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcn1_ena);
+ qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
+}
+
+/*
+ * Set the parameters for mez cards on link bounce, so they are
+ * always exactly what was requested. Similar logic to init_txdds
+ * but does just the serdes.
+ */
+static void adj_tx_serdes(struct qib_pportdata *ppd)
+{
+ const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
+ struct txdds_ent *dds;
+
+ find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
+ dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
+ qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
+ ddr_dds : sdr_dds));
+ write_tx_serdes_param(ppd, dds);
+}
+
+/* set QDR forced value for H1, if needed */
+static void force_h1(struct qib_pportdata *ppd)
+{
+ int chan;
+
+ ppd->cpspec->qdr_reforce = 0;
+ if (!ppd->dd->cspec->r1)
+ return;
+
+ for (chan = 0; chan < SERDES_CHANS; chan++) {
+ set_man_mode_h1(ppd, chan, 1, 0);
+ set_man_code(ppd, chan, ppd->cpspec->h1_val);
+ clock_man(ppd, chan);
+ set_man_mode_h1(ppd, chan, 0, 0);
+ }
+}
+
+#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
+#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
+
+#define R_OPCODE_LSB 3
+#define R_OP_NOP 0
+#define R_OP_SHIFT 2
+#define R_OP_UPDATE 3
+#define R_TDI_LSB 2
+#define R_TDO_LSB 1
+#define R_RDY 1
+
+static int qib_r_grab(struct qib_devdata *dd)
+{
+ u64 val;
+ val = SJA_EN;
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ return 0;
+}
+
+/* qib_r_wait_for_rdy() not only waits for the ready bit, it
+ * returns the current state of R_TDO
+ */
+static int qib_r_wait_for_rdy(struct qib_devdata *dd)
+{
+ u64 val;
+ int timeout;
+ for (timeout = 0; timeout < 100 ; ++timeout) {
+ val = qib_read_kreg32(dd, kr_r_access);
+ if (val & R_RDY)
+ return (val >> R_TDO_LSB) & 1;
+ }
+ return -1;
+}
+
+static int qib_r_shift(struct qib_devdata *dd, int bisten,
+ int len, u8 *inp, u8 *outp)
+{
+ u64 valbase, val;
+ int ret, pos;
+
+ valbase = SJA_EN | (bisten << BISTEN_LSB) |
+ (R_OP_SHIFT << R_OPCODE_LSB);
+ ret = qib_r_wait_for_rdy(dd);
+ if (ret < 0)
+ goto bail;
+ for (pos = 0; pos < len; ++pos) {
+ val = valbase;
+ if (outp) {
+ outp[pos >> 3] &= ~(1 << (pos & 7));
+ outp[pos >> 3] |= (ret << (pos & 7));
+ }
+ if (inp) {
+ int tdi = inp[pos >> 3] >> (pos & 7);
+ val |= ((tdi & 1) << R_TDI_LSB);
+ }
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ ret = qib_r_wait_for_rdy(dd);
+ if (ret < 0)
+ break;
+ }
+ /* Restore to NOP between operations. */
+ val = SJA_EN | (bisten << BISTEN_LSB);
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ ret = qib_r_wait_for_rdy(dd);
+
+ if (ret >= 0)
+ ret = pos;
+bail:
+ return ret;
+}
+
+static int qib_r_update(struct qib_devdata *dd, int bisten)
+{
+ u64 val;
+ int ret;
+
+ val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
+ ret = qib_r_wait_for_rdy(dd);
+ if (ret >= 0) {
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+ return ret;
+}
+
+#define BISTEN_PORT_SEL 15
+#define LEN_PORT_SEL 625
+#define BISTEN_AT 17
+#define LEN_AT 156
+#define BISTEN_ETM 16
+#define LEN_ETM 632
+
+#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
+
+/* these are common for all IB port use cases. */
+static u8 reset_at[BIT2BYTE(LEN_AT)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+};
+static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
+ 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
+ 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
+ 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
+ 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+};
+static u8 at[BIT2BYTE(LEN_AT)] = {
+ 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+};
+
+/* used for IB1 or IB2, only one in use */
+static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
+ 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
+};
+
+/* used when both IB1 and IB2 are in use */
+static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
+ 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
+ 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
+ 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
+};
+
+/* used when only IB1 is in use */
+static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
+ 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* used when only IB2 is in use */
+static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
+ 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
+ 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
+ 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
+ 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
+ 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
+ 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
+ 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
+};
+
+/* used when both IB1 and IB2 are in use */
+static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
+ 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
+ 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/*
+ * Do setup to properly handle IB link recovery; if port is zero, we
+ * are initializing to cover both ports; otherwise we are initializing
+ * to cover a single port card, or the port has reached INIT and we may
+ * need to switch coverage types.
+ */
+static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
+{
+ u8 *portsel, *etm;
+ struct qib_devdata *dd = ppd->dd;
+
+ if (!ppd->dd->cspec->r1)
+ return;
+ if (!both) {
+ dd->cspec->recovery_ports_initted++;
+ ppd->cpspec->recovery_init = 1;
+ }
+ if (!both && dd->cspec->recovery_ports_initted == 1) {
+ portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
+ etm = atetm_1port;
+ } else {
+ portsel = portsel_2port;
+ etm = atetm_2port;
+ }
+
+ if (qib_r_grab(dd) < 0 ||
+ qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_ETM) < 0 ||
+ qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_AT) < 0 ||
+ qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
+ portsel, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
+ qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_AT) < 0 ||
+ qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_ETM) < 0)
+ qib_dev_err(dd, "Failed IB link recovery setup\n");
+}
+
+static void check_7322_rxe_status(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 fmask;
+
+ if (dd->cspec->recovery_ports_initted != 1)
+ return; /* rest doesn't apply to dualport */
+ qib_write_kreg(dd, kr_control, dd->control |
+ SYM_MASK(Control, FreezeMode));
+ (void)qib_read_kreg64(dd, kr_scratch);
+ udelay(3); /* ibcreset asserted 400ns, be sure that's over */
+ fmask = qib_read_kreg64(dd, kr_act_fmask);
+ if (!fmask) {
+ /*
+ * require a powercycle before we'll work again, and make
+ * sure we get no more interrupts, and don't turn off
+ * freeze.
+ */
+ ppd->dd->cspec->stay_in_freeze = 1;
+ qib_7322_set_intr_state(ppd->dd, 0);
+ qib_write_kreg(dd, kr_fmask, 0ULL);
+ qib_dev_err(dd, "HCA unusable until powercycled\n");
+ return; /* eventually reset */
+ }
+
+ qib_write_kreg(ppd->dd, kr_hwerrclear,
+ SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
+
+ /* don't do the full clear_freeze(), not needed for this */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+ /* take IBC out of reset */
+ if (ppd->link_speed_supported) {
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_read_kreg32(dd, kr_scratch);
+ if (ppd->lflags & QIBL_IB_LINK_DISABLED)
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
new file mode 100644
index 000000000000..9b40f345ac3f
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -0,0 +1,1586 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/*
+ * min buffers we want to have per context, after driver
+ */
+#define QIB_MIN_USER_CTXT_BUFCNT 7
+
+#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
+#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
+#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
+
+/*
+ * Number of ctxts we are configured to use (to allow for more pio
+ * buffers per ctxt, etc.) Zero means use chip value.
+ */
+ushort qib_cfgctxts;
+module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
+MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
+
+/*
+ * If set, do not write to any regs if avoidable, hack to allow
+ * check for deranged default register values.
+ */
+ushort qib_mini_init;
+module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
+MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
+
+unsigned qib_n_krcv_queues;
+module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
+MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
+
+/*
+ * qib_wc_pat parameter:
+ * 0 is WC via MTRR
+ * 1 is WC via PAT
+ * If PAT initialization fails, code reverts back to MTRR
+ */
+unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
+module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
+MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
+
+struct workqueue_struct *qib_wq;
+struct workqueue_struct *qib_cq_wq;
+
+static void verify_interrupt(unsigned long);
+
+static struct idr qib_unit_table;
+u32 qib_cpulist_count;
+unsigned long *qib_cpulist;
+
+/* set number of contexts we'll actually use */
+void qib_set_ctxtcnt(struct qib_devdata *dd)
+{
+ if (!qib_cfgctxts)
+ dd->cfgctxts = dd->ctxtcnt;
+ else if (qib_cfgctxts < dd->num_pports)
+ dd->cfgctxts = dd->ctxtcnt;
+ else if (qib_cfgctxts <= dd->ctxtcnt)
+ dd->cfgctxts = qib_cfgctxts;
+ else
+ dd->cfgctxts = dd->ctxtcnt;
+}
+
+/*
+ * Common code for creating the receive context array.
+ */
+int qib_create_ctxts(struct qib_devdata *dd)
+{
+ unsigned i;
+ int ret;
+
+ /*
+ * Allocate full ctxtcnt array, rather than just cfgctxts, because
+ * cleanup iterates across all possible ctxts.
+ */
+ dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
+ if (!dd->rcd) {
+ qib_dev_err(dd, "Unable to allocate ctxtdata array, "
+ "failing\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* create (one or more) kctxt */
+ for (i = 0; i < dd->first_user_ctxt; ++i) {
+ struct qib_pportdata *ppd;
+ struct qib_ctxtdata *rcd;
+
+ if (dd->skip_kctxt_mask & (1 << i))
+ continue;
+
+ ppd = dd->pport + (i % dd->num_pports);
+ rcd = qib_create_ctxtdata(ppd, i);
+ if (!rcd) {
+ qib_dev_err(dd, "Unable to allocate ctxtdata"
+ " for Kernel ctxt, failing\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+ rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
+ rcd->seq_cnt = 1;
+ }
+ ret = 0;
+done:
+ return ret;
+}
+
+/*
+ * Common code for user and kernel context setup.
+ */
+struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+
+ rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
+ if (rcd) {
+ INIT_LIST_HEAD(&rcd->qp_wait_list);
+ rcd->ppd = ppd;
+ rcd->dd = dd;
+ rcd->cnt = 1;
+ rcd->ctxt = ctxt;
+ dd->rcd[ctxt] = rcd;
+
+ dd->f_init_ctxt(rcd);
+
+ /*
+ * To avoid wasting a lot of memory, we allocate 32KB chunks
+ * of physically contiguous memory, advance through it until
+ * used up and then allocate more. Of course, we need
+ * memory to store those extra pointers, now. 32KB seems to
+ * be the most that is "safe" under memory pressure
+ * (creating large files and then copying them over
+ * NFS while doing lots of MPI jobs). The OOM killer can
+ * get invoked, even though we say we can sleep and this can
+ * cause significant system problems....
+ */
+ rcd->rcvegrbuf_size = 0x8000;
+ rcd->rcvegrbufs_perchunk =
+ rcd->rcvegrbuf_size / dd->rcvegrbufsize;
+ rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
+ rcd->rcvegrbufs_perchunk - 1) /
+ rcd->rcvegrbufs_perchunk;
+ }
+ return rcd;
+}
+
+/*
+ * Common code for initializing the physical port structure.
+ */
+void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
+ u8 hw_pidx, u8 port)
+{
+ ppd->dd = dd;
+ ppd->hw_pidx = hw_pidx;
+ ppd->port = port; /* IB port number, not index */
+
+ spin_lock_init(&ppd->sdma_lock);
+ spin_lock_init(&ppd->lflags_lock);
+ init_waitqueue_head(&ppd->state_wait);
+
+ init_timer(&ppd->symerr_clear_timer);
+ ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup;
+ ppd->symerr_clear_timer.data = (unsigned long)ppd;
+}
+
+static int init_pioavailregs(struct qib_devdata *dd)
+{
+ int ret, pidx;
+ u64 *status_page;
+
+ dd->pioavailregs_dma = dma_alloc_coherent(
+ &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
+ GFP_KERNEL);
+ if (!dd->pioavailregs_dma) {
+ qib_dev_err(dd, "failed to allocate PIOavail reg area "
+ "in memory\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * We really want L2 cache aligned, but for current CPUs of
+ * interest, they are the same.
+ */
+ status_page = (u64 *)
+ ((char *) dd->pioavailregs_dma +
+ ((2 * L1_CACHE_BYTES +
+ dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
+ /* device status comes first, for backwards compatibility */
+ dd->devstatusp = status_page;
+ *status_page++ = 0;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ dd->pport[pidx].statusp = status_page;
+ *status_page++ = 0;
+ }
+
+ /*
+ * Setup buffer to hold freeze and other messages, accessible to
+ * apps, following statusp. This is per-unit, not per port.
+ */
+ dd->freezemsg = (char *) status_page;
+ *dd->freezemsg = 0;
+ /* length of msg buffer is "whatever is left" */
+ ret = (char *) status_page - (char *) dd->pioavailregs_dma;
+ dd->freezelen = PAGE_SIZE - ret;
+
+ ret = 0;
+
+done:
+ return ret;
+}
+
+/**
+ * init_shadow_tids - allocate the shadow TID array
+ * @dd: the qlogic_ib device
+ *
+ * allocate the shadow TID array, so we can qib_munlock previous
+ * entries. It may make more sense to move the pageshadow to the
+ * ctxt data structure, so we only allocate memory for ctxts actually
+ * in use, since we at 8k per ctxt, now.
+ * We don't want failures here to prevent use of the driver/chip,
+ * so no return value.
+ */
+static void init_shadow_tids(struct qib_devdata *dd)
+{
+ struct page **pages;
+ dma_addr_t *addrs;
+
+ pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+ if (!pages) {
+ qib_dev_err(dd, "failed to allocate shadow page * "
+ "array, no expected sends!\n");
+ goto bail;
+ }
+
+ addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+ if (!addrs) {
+ qib_dev_err(dd, "failed to allocate shadow dma handle "
+ "array, no expected sends!\n");
+ goto bail_free;
+ }
+
+ memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+ memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+
+ dd->pageshadow = pages;
+ dd->physshadow = addrs;
+ return;
+
+bail_free:
+ vfree(pages);
+bail:
+ dd->pageshadow = NULL;
+}
+
+/*
+ * Do initialization for device that is only needed on
+ * first detect, not on resets.
+ */
+static int loadtime_init(struct qib_devdata *dd)
+{
+ int ret = 0;
+
+ if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
+ QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
+ qib_dev_err(dd, "Driver only handles version %d, "
+ "chip swversion is %d (%llx), failng\n",
+ QIB_CHIP_SWVERSION,
+ (int)(dd->revision >>
+ QLOGIC_IB_R_SOFTWARE_SHIFT) &
+ QLOGIC_IB_R_SOFTWARE_MASK,
+ (unsigned long long) dd->revision);
+ ret = -ENOSYS;
+ goto done;
+ }
+
+ if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
+ qib_devinfo(dd->pcidev, "%s", dd->boardversion);
+
+ spin_lock_init(&dd->pioavail_lock);
+ spin_lock_init(&dd->sendctrl_lock);
+ spin_lock_init(&dd->uctxt_lock);
+ spin_lock_init(&dd->qib_diag_trans_lock);
+ spin_lock_init(&dd->eep_st_lock);
+ mutex_init(&dd->eep_lock);
+
+ if (qib_mini_init)
+ goto done;
+
+ ret = init_pioavailregs(dd);
+ init_shadow_tids(dd);
+
+ qib_get_eeprom_info(dd);
+
+ /* setup time (don't start yet) to verify we got interrupt */
+ init_timer(&dd->intrchk_timer);
+ dd->intrchk_timer.function = verify_interrupt;
+ dd->intrchk_timer.data = (unsigned long) dd;
+
+done:
+ return ret;
+}
+
+/**
+ * init_after_reset - re-initialize after a reset
+ * @dd: the qlogic_ib device
+ *
+ * sanity check at least some of the values after reset, and
+ * ensure no receive or transmit (explictly, in case reset
+ * failed
+ */
+static int init_after_reset(struct qib_devdata *dd)
+{
+ int i;
+
+ /*
+ * Ensure chip does no sends or receives, tail updates, or
+ * pioavail updates while we re-initialize. This is mostly
+ * for the driver data structures, not chip registers.
+ */
+ for (i = 0; i < dd->num_pports; ++i) {
+ /*
+ * ctxt == -1 means "all contexts". Only really safe for
+ * _dis_abling things, as here.
+ */
+ dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
+ QIB_RCVCTRL_INTRAVAIL_DIS |
+ QIB_RCVCTRL_TAILUPD_DIS, -1);
+ /* Redundant across ports for some, but no big deal. */
+ dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
+ QIB_SENDCTRL_AVAIL_DIS);
+ }
+
+ return 0;
+}
+
+static void enable_chip(struct qib_devdata *dd)
+{
+ u64 rcvmask;
+ int i;
+
+ /*
+ * Enable PIO send, and update of PIOavail regs to memory.
+ */
+ for (i = 0; i < dd->num_pports; ++i)
+ dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
+ QIB_SENDCTRL_AVAIL_ENB);
+ /*
+ * Enable kernel ctxts' receive and receive interrupt.
+ * Other ctxts done as user opens and inits them.
+ */
+ rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
+ rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
+ QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
+ for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
+ struct qib_ctxtdata *rcd = dd->rcd[i];
+
+ if (rcd)
+ dd->f_rcvctrl(rcd->ppd, rcvmask, i);
+ }
+}
+
+static void verify_interrupt(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+
+ if (!dd)
+ return; /* being torn down */
+
+ /*
+ * If we don't have a lid or any interrupts, let the user know and
+ * don't bother checking again.
+ */
+ if (dd->int_counter == 0) {
+ if (!dd->f_intr_fallback(dd))
+ dev_err(&dd->pcidev->dev, "No interrupts detected, "
+ "not usable.\n");
+ else /* re-arm the timer to see if fallback works */
+ mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
+ }
+}
+
+static void init_piobuf_state(struct qib_devdata *dd)
+{
+ int i, pidx;
+ u32 uctxts;
+
+ /*
+ * Ensure all buffers are free, and fifos empty. Buffers
+ * are common, so only do once for port 0.
+ *
+ * After enable and qib_chg_pioavailkernel so we can safely
+ * enable pioavail updates and PIOENABLE. After this, packets
+ * are ready and able to go out.
+ */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
+
+ /*
+ * If not all sendbufs are used, add the one to each of the lower
+ * numbered contexts. pbufsctxt and lastctxt_piobuf are
+ * calculated in chip-specific code because it may cause some
+ * chip-specific adjustments to be made.
+ */
+ uctxts = dd->cfgctxts - dd->first_user_ctxt;
+ dd->ctxts_extrabuf = dd->pbufsctxt ?
+ dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
+
+ /*
+ * Set up the shadow copies of the piobufavail registers,
+ * which we compare against the chip registers for now, and
+ * the in memory DMA'ed copies of the registers.
+ * By now pioavail updates to memory should have occurred, so
+ * copy them into our working/shadow registers; this is in
+ * case something went wrong with abort, but mostly to get the
+ * initial values of the generation bit correct.
+ */
+ for (i = 0; i < dd->pioavregs; i++) {
+ __le64 tmp;
+
+ tmp = dd->pioavailregs_dma[i];
+ /*
+ * Don't need to worry about pioavailkernel here
+ * because we will call qib_chg_pioavailkernel() later
+ * in initialization, to busy out buffers as needed.
+ */
+ dd->pioavailshadow[i] = le64_to_cpu(tmp);
+ }
+ while (i < ARRAY_SIZE(dd->pioavailshadow))
+ dd->pioavailshadow[i++] = 0; /* for debugging sanity */
+
+ /* after pioavailshadow is setup */
+ qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
+ TXCHK_CHG_TYPE_KERN, NULL);
+ dd->f_initvl15_bufs(dd);
+}
+
+/**
+ * qib_init - do the actual initialization sequence on the chip
+ * @dd: the qlogic_ib device
+ * @reinit: reinitializing, so don't allocate new memory
+ *
+ * Do the actual initialization sequence on the chip. This is done
+ * both from the init routine called from the PCI infrastructure, and
+ * when we reset the chip, or detect that it was reset internally,
+ * or it's administratively re-enabled.
+ *
+ * Memory allocation here and in called routines is only done in
+ * the first case (reinit == 0). We have to be careful, because even
+ * without memory allocation, we need to re-write all the chip registers
+ * TIDs, etc. after the reset or enable has completed.
+ */
+int qib_init(struct qib_devdata *dd, int reinit)
+{
+ int ret = 0, pidx, lastfail = 0;
+ u32 portok = 0;
+ unsigned i;
+ struct qib_ctxtdata *rcd;
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+
+ /* Set linkstate to unknown, so we can watch for a transition. */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
+ QIBL_LINKDOWN | QIBL_LINKINIT |
+ QIBL_LINKV);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+ if (reinit)
+ ret = init_after_reset(dd);
+ else
+ ret = loadtime_init(dd);
+ if (ret)
+ goto done;
+
+ /* Bypass most chip-init, to get to device creation */
+ if (qib_mini_init)
+ return 0;
+
+ ret = dd->f_late_initreg(dd);
+ if (ret)
+ goto done;
+
+ /* dd->rcd can be NULL if early init failed */
+ for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
+ /*
+ * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
+ * re-init, the simplest way to handle this is to free
+ * existing, and re-allocate.
+ * Need to re-create rest of ctxt 0 ctxtdata as well.
+ */
+ rcd = dd->rcd[i];
+ if (!rcd)
+ continue;
+
+ lastfail = qib_create_rcvhdrq(dd, rcd);
+ if (!lastfail)
+ lastfail = qib_setup_eagerbufs(rcd);
+ if (lastfail) {
+ qib_dev_err(dd, "failed to allocate kernel ctxt's "
+ "rcvhdrq and/or egr bufs\n");
+ continue;
+ }
+ }
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ int mtu;
+ if (lastfail)
+ ret = lastfail;
+ ppd = dd->pport + pidx;
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1) {
+ mtu = QIB_DEFAULT_MTU;
+ qib_ibmtu = 0; /* don't leave invalid value */
+ }
+ /* set max we can ever have for this driver load */
+ ppd->init_ibmaxlen = min(mtu > 2048 ?
+ dd->piosize4k : dd->piosize2k,
+ dd->rcvegrbufsize +
+ (dd->rcvhdrentsize << 2));
+ /*
+ * Have to initialize ibmaxlen, but this will normally
+ * change immediately in qib_set_mtu().
+ */
+ ppd->ibmaxlen = ppd->init_ibmaxlen;
+ qib_set_mtu(ppd, mtu);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ lastfail = dd->f_bringup_serdes(ppd);
+ if (lastfail) {
+ qib_devinfo(dd->pcidev,
+ "Failed to bringup IB port %u\n", ppd->port);
+ lastfail = -ENETDOWN;
+ continue;
+ }
+
+ /* let link come up, and enable IBC */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ portok++;
+ }
+
+ if (!portok) {
+ /* none of the ports initialized */
+ if (!ret && lastfail)
+ ret = lastfail;
+ else if (!ret)
+ ret = -ENETDOWN;
+ /* but continue on, so we can debug cause */
+ }
+
+ enable_chip(dd);
+
+ init_piobuf_state(dd);
+
+done:
+ if (!ret) {
+ /* chip is OK for user apps; mark it as initialized */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ /*
+ * Set status even if port serdes is not initialized
+ * so that diags will work.
+ */
+ *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
+ QIB_STATUS_INITTED;
+ if (!ppd->link_speed_enabled)
+ continue;
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ ret = qib_setup_sdma(ppd);
+ init_timer(&ppd->hol_timer);
+ ppd->hol_timer.function = qib_hol_event;
+ ppd->hol_timer.data = (unsigned long)ppd;
+ ppd->hol_state = QIB_HOL_UP;
+ }
+
+ /* now we can enable all interrupts from the chip */
+ dd->f_set_intr_state(dd, 1);
+
+ /*
+ * Setup to verify we get an interrupt, and fallback
+ * to an alternate if necessary and possible.
+ */
+ mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
+ /* start stats retrieval timer */
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+ }
+
+ /* if ret is non-zero, we probably should do some cleanup here... */
+ return ret;
+}
+
+/*
+ * These next two routines are placeholders in case we don't have per-arch
+ * code for controlling write combining. If explicit control of write
+ * combining is not available, performance will probably be awful.
+ */
+
+int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
+{
+ return -EOPNOTSUPP;
+}
+
+void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
+{
+}
+
+static inline struct qib_devdata *__qib_lookup(int unit)
+{
+ return idr_find(&qib_unit_table, unit);
+}
+
+struct qib_devdata *qib_lookup(int unit)
+{
+ struct qib_devdata *dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ dd = __qib_lookup(unit);
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ return dd;
+}
+
+/*
+ * Stop the timers during unit shutdown, or after an error late
+ * in initialization.
+ */
+static void qib_stop_timers(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ int pidx;
+
+ if (dd->stats_timer.data) {
+ del_timer_sync(&dd->stats_timer);
+ dd->stats_timer.data = 0;
+ }
+ if (dd->intrchk_timer.data) {
+ del_timer_sync(&dd->intrchk_timer);
+ dd->intrchk_timer.data = 0;
+ }
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->hol_timer.data)
+ del_timer_sync(&ppd->hol_timer);
+ if (ppd->led_override_timer.data) {
+ del_timer_sync(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 0);
+ }
+ if (ppd->symerr_clear_timer.data)
+ del_timer_sync(&ppd->symerr_clear_timer);
+ }
+}
+
+/**
+ * qib_shutdown_device - shut down a device
+ * @dd: the qlogic_ib device
+ *
+ * This is called to make the device quiet when we are about to
+ * unload the driver, and also when the device is administratively
+ * disabled. It does not free any data structures.
+ * Everything it does has to be setup again by qib_init(dd, 1)
+ */
+static void qib_shutdown_device(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ unsigned pidx;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ spin_lock_irq(&ppd->lflags_lock);
+ ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
+ QIBL_LINKARMED | QIBL_LINKACTIVE |
+ QIBL_LINKV);
+ spin_unlock_irq(&ppd->lflags_lock);
+ *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
+ }
+ dd->flags &= ~QIB_INITTED;
+
+ /* mask interrupts, but not errors */
+ dd->f_set_intr_state(dd, 0);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
+ QIB_RCVCTRL_CTXT_DIS |
+ QIB_RCVCTRL_INTRAVAIL_DIS |
+ QIB_RCVCTRL_PKEY_ENB, -1);
+ /*
+ * Gracefully stop all sends allowing any in progress to
+ * trickle out first.
+ */
+ dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
+ }
+
+ /*
+ * Enough for anything that's going to trickle out to have actually
+ * done so.
+ */
+ udelay(20);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ dd->f_setextled(ppd, 0); /* make sure LEDs are off */
+
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ qib_teardown_sdma(ppd);
+
+ dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
+ QIB_SENDCTRL_SEND_DIS);
+ /*
+ * Clear SerdesEnable.
+ * We can't count on interrupts since we are stopping.
+ */
+ dd->f_quiet_serdes(ppd);
+ }
+
+ qib_update_eeprom_log(dd);
+}
+
+/**
+ * qib_free_ctxtdata - free a context's allocated data
+ * @dd: the qlogic_ib device
+ * @rcd: the ctxtdata structure
+ *
+ * free up any allocated data for a context
+ * This should not touch anything that would affect a simultaneous
+ * re-allocation of context data, because it is called after qib_mutex
+ * is released (and can be called from reinit as well).
+ * It should never change any chip state, or global driver state.
+ */
+void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
+{
+ if (!rcd)
+ return;
+
+ if (rcd->rcvhdrq) {
+ dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
+ rcd->rcvhdrq, rcd->rcvhdrq_phys);
+ rcd->rcvhdrq = NULL;
+ if (rcd->rcvhdrtail_kvaddr) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ rcd->rcvhdrtail_kvaddr,
+ rcd->rcvhdrqtailaddr_phys);
+ rcd->rcvhdrtail_kvaddr = NULL;
+ }
+ }
+ if (rcd->rcvegrbuf) {
+ unsigned e;
+
+ for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
+ void *base = rcd->rcvegrbuf[e];
+ size_t size = rcd->rcvegrbuf_size;
+
+ dma_free_coherent(&dd->pcidev->dev, size,
+ base, rcd->rcvegrbuf_phys[e]);
+ }
+ kfree(rcd->rcvegrbuf);
+ rcd->rcvegrbuf = NULL;
+ kfree(rcd->rcvegrbuf_phys);
+ rcd->rcvegrbuf_phys = NULL;
+ rcd->rcvegrbuf_chunks = 0;
+ }
+
+ kfree(rcd->tid_pg_list);
+ vfree(rcd->user_event_mask);
+ vfree(rcd->subctxt_uregbase);
+ vfree(rcd->subctxt_rcvegrbuf);
+ vfree(rcd->subctxt_rcvhdr_base);
+ kfree(rcd);
+}
+
+/*
+ * Perform a PIO buffer bandwidth write test, to verify proper system
+ * configuration. Even when all the setup calls work, occasionally
+ * BIOS or other issues can prevent write combining from working, or
+ * can cause other bandwidth problems to the chip.
+ *
+ * This test simply writes the same buffer over and over again, and
+ * measures close to the peak bandwidth to the chip (not testing
+ * data bandwidth to the wire). On chips that use an address-based
+ * trigger to send packets to the wire, this is easy. On chips that
+ * use a count to trigger, we want to make sure that the packet doesn't
+ * go out on the wire, or trigger flow control checks.
+ */
+static void qib_verify_pioperf(struct qib_devdata *dd)
+{
+ u32 pbnum, cnt, lcnt;
+ u32 __iomem *piobuf;
+ u32 *addr;
+ u64 msecs, emsecs;
+
+ piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
+ if (!piobuf) {
+ qib_devinfo(dd->pcidev,
+ "No PIObufs for checking perf, skipping\n");
+ return;
+ }
+
+ /*
+ * Enough to give us a reasonable test, less than piobuf size, and
+ * likely multiple of store buffer length.
+ */
+ cnt = 1024;
+
+ addr = vmalloc(cnt);
+ if (!addr) {
+ qib_devinfo(dd->pcidev,
+ "Couldn't get memory for checking PIO perf,"
+ " skipping\n");
+ goto done;
+ }
+
+ preempt_disable(); /* we want reasonably accurate elapsed time */
+ msecs = 1 + jiffies_to_msecs(jiffies);
+ for (lcnt = 0; lcnt < 10000U; lcnt++) {
+ /* wait until we cross msec boundary */
+ if (jiffies_to_msecs(jiffies) >= msecs)
+ break;
+ udelay(1);
+ }
+
+ dd->f_set_armlaunch(dd, 0);
+
+ /*
+ * length 0, no dwords actually sent
+ */
+ writeq(0, piobuf);
+ qib_flush_wc();
+
+ /*
+ * This is only roughly accurate, since even with preempt we
+ * still take interrupts that could take a while. Running for
+ * >= 5 msec seems to get us "close enough" to accurate values.
+ */
+ msecs = jiffies_to_msecs(jiffies);
+ for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
+ qib_pio_copy(piobuf + 64, addr, cnt >> 2);
+ emsecs = jiffies_to_msecs(jiffies) - msecs;
+ }
+
+ /* 1 GiB/sec, slightly over IB SDR line rate */
+ if (lcnt < (emsecs * 1024U))
+ qib_dev_err(dd,
+ "Performance problem: bandwidth to PIO buffers is "
+ "only %u MiB/sec\n",
+ lcnt / (u32) emsecs);
+
+ preempt_enable();
+
+ vfree(addr);
+
+done:
+ /* disarm piobuf, so it's available again */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
+ qib_sendbuf_done(dd, pbnum);
+ dd->f_set_armlaunch(dd, 1);
+}
+
+
+void qib_free_devdata(struct qib_devdata *dd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ idr_remove(&qib_unit_table, dd->unit);
+ list_del(&dd->list);
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ ib_dealloc_device(&dd->verbs_dev.ibdev);
+}
+
+/*
+ * Allocate our primary per-unit data structure. Must be done via verbs
+ * allocator, because the verbs cleanup process both does cleanup and
+ * free of the data structure.
+ * "extra" is for chip-specific data.
+ *
+ * Use the idr mechanism to get a unit number for this unit.
+ */
+struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
+{
+ unsigned long flags;
+ struct qib_devdata *dd;
+ int ret;
+
+ if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
+ dd = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
+ if (!dd) {
+ dd = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
+ if (ret >= 0)
+ list_add(&dd->list, &qib_dev_list);
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ if (ret < 0) {
+ qib_early_err(&pdev->dev,
+ "Could not allocate unit ID: error %d\n", -ret);
+ ib_dealloc_device(&dd->verbs_dev.ibdev);
+ dd = ERR_PTR(ret);
+ goto bail;
+ }
+
+ if (!qib_cpulist_count) {
+ u32 count = num_online_cpus();
+ qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
+ sizeof(long), GFP_KERNEL);
+ if (qib_cpulist)
+ qib_cpulist_count = count;
+ else
+ qib_early_err(&pdev->dev, "Could not alloc cpulist "
+ "info, cpu affinity might be wrong\n");
+ }
+
+bail:
+ return dd;
+}
+
+/*
+ * Called from freeze mode handlers, and from PCI error
+ * reporting code. Should be paranoid about state of
+ * system and data structures.
+ */
+void qib_disable_after_error(struct qib_devdata *dd)
+{
+ if (dd->flags & QIB_INITTED) {
+ u32 pidx;
+
+ dd->flags &= ~QIB_INITTED;
+ if (dd->pport)
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ struct qib_pportdata *ppd;
+
+ ppd = dd->pport + pidx;
+ if (dd->flags & QIB_PRESENT) {
+ qib_set_linkstate(ppd,
+ QIB_IB_LINKDOWN_DISABLE);
+ dd->f_setextled(ppd, 0);
+ }
+ *ppd->statusp &= ~QIB_STATUS_IB_READY;
+ }
+ }
+
+ /*
+ * Mark as having had an error for driver, and also
+ * for /sys and status word mapped to user programs.
+ * This marks unit as not usable, until reset.
+ */
+ if (dd->devstatusp)
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+}
+
+static void __devexit qib_remove_one(struct pci_dev *);
+static int __devinit qib_init_one(struct pci_dev *,
+ const struct pci_device_id *);
+
+#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
+#define PFX QIB_DRV_NAME ": "
+
+static const struct pci_device_id qib_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
+
+struct pci_driver qib_driver = {
+ .name = QIB_DRV_NAME,
+ .probe = qib_init_one,
+ .remove = __devexit_p(qib_remove_one),
+ .id_table = qib_pci_tbl,
+ .err_handler = &qib_pci_err_handler,
+};
+
+/*
+ * Do all the generic driver unit- and chip-independent memory
+ * allocation and initialization.
+ */
+static int __init qlogic_ib_init(void)
+{
+ int ret;
+
+ ret = qib_dev_init();
+ if (ret)
+ goto bail;
+
+ /*
+ * We create our own workqueue mainly because we want to be
+ * able to flush it when devices are being removed. We can't
+ * use schedule_work()/flush_scheduled_work() because both
+ * unregister_netdev() and linkwatch_event take the rtnl lock,
+ * so flush_scheduled_work() can deadlock during device
+ * removal.
+ */
+ qib_wq = create_workqueue("qib");
+ if (!qib_wq) {
+ ret = -ENOMEM;
+ goto bail_dev;
+ }
+
+ qib_cq_wq = create_workqueue("qib_cq");
+ if (!qib_cq_wq) {
+ ret = -ENOMEM;
+ goto bail_wq;
+ }
+
+ /*
+ * These must be called before the driver is registered with
+ * the PCI subsystem.
+ */
+ idr_init(&qib_unit_table);
+ if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
+ printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n");
+ ret = -ENOMEM;
+ goto bail_cq_wq;
+ }
+
+ ret = pci_register_driver(&qib_driver);
+ if (ret < 0) {
+ printk(KERN_ERR QIB_DRV_NAME
+ ": Unable to register driver: error %d\n", -ret);
+ goto bail_unit;
+ }
+
+ /* not fatal if it doesn't work */
+ if (qib_init_qibfs())
+ printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n");
+ goto bail; /* all OK */
+
+bail_unit:
+ idr_destroy(&qib_unit_table);
+bail_cq_wq:
+ destroy_workqueue(qib_cq_wq);
+bail_wq:
+ destroy_workqueue(qib_wq);
+bail_dev:
+ qib_dev_cleanup();
+bail:
+ return ret;
+}
+
+module_init(qlogic_ib_init);
+
+/*
+ * Do the non-unit driver cleanup, memory free, etc. at unload.
+ */
+static void __exit qlogic_ib_cleanup(void)
+{
+ int ret;
+
+ ret = qib_exit_qibfs();
+ if (ret)
+ printk(KERN_ERR QIB_DRV_NAME ": "
+ "Unable to cleanup counter filesystem: "
+ "error %d\n", -ret);
+
+ pci_unregister_driver(&qib_driver);
+
+ destroy_workqueue(qib_wq);
+ destroy_workqueue(qib_cq_wq);
+
+ qib_cpulist_count = 0;
+ kfree(qib_cpulist);
+
+ idr_destroy(&qib_unit_table);
+ qib_dev_cleanup();
+}
+
+module_exit(qlogic_ib_cleanup);
+
+/* this can only be called after a successful initialization */
+static void cleanup_device_data(struct qib_devdata *dd)
+{
+ int ctxt;
+ int pidx;
+ struct qib_ctxtdata **tmp;
+ unsigned long flags;
+
+ /* users can't do anything more with chip */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].statusp)
+ *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
+
+ if (!qib_wc_pat)
+ qib_disable_wc(dd);
+
+ if (dd->pioavailregs_dma) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ (void *) dd->pioavailregs_dma,
+ dd->pioavailregs_phys);
+ dd->pioavailregs_dma = NULL;
+ }
+
+ if (dd->pageshadow) {
+ struct page **tmpp = dd->pageshadow;
+ dma_addr_t *tmpd = dd->physshadow;
+ int i, cnt = 0;
+
+ for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
+ int ctxt_tidbase = ctxt * dd->rcvtidcnt;
+ int maxtid = ctxt_tidbase + dd->rcvtidcnt;
+
+ for (i = ctxt_tidbase; i < maxtid; i++) {
+ if (!tmpp[i])
+ continue;
+ pci_unmap_page(dd->pcidev, tmpd[i],
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ qib_release_user_pages(&tmpp[i], 1);
+ tmpp[i] = NULL;
+ cnt++;
+ }
+ }
+
+ tmpp = dd->pageshadow;
+ dd->pageshadow = NULL;
+ vfree(tmpp);
+ }
+
+ /*
+ * Free any resources still in use (usually just kernel contexts)
+ * at unload; we do for ctxtcnt, because that's what we allocate.
+ * We acquire lock to be really paranoid that rcd isn't being
+ * accessed from some interrupt-related code (that should not happen,
+ * but best to be sure).
+ */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ tmp = dd->rcd;
+ dd->rcd = NULL;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
+ struct qib_ctxtdata *rcd = tmp[ctxt];
+
+ tmp[ctxt] = NULL; /* debugging paranoia */
+ qib_free_ctxtdata(dd, rcd);
+ }
+ kfree(tmp);
+ kfree(dd->boardname);
+}
+
+/*
+ * Clean up on unit shutdown, or error during unit load after
+ * successful initialization.
+ */
+static void qib_postinit_cleanup(struct qib_devdata *dd)
+{
+ /*
+ * Clean up chip-specific stuff.
+ * We check for NULL here, because it's outside
+ * the kregbase check, and we need to call it
+ * after the free_irq. Thus it's possible that
+ * the function pointers were never initialized.
+ */
+ if (dd->f_cleanup)
+ dd->f_cleanup(dd);
+
+ qib_pcie_ddcleanup(dd);
+
+ cleanup_device_data(dd);
+
+ qib_free_devdata(dd);
+}
+
+static int __devinit qib_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret, j, pidx, initfail;
+ struct qib_devdata *dd = NULL;
+
+ ret = qib_pcie_init(pdev, ent);
+ if (ret)
+ goto bail;
+
+ /*
+ * Do device-specific initialiation, function table setup, dd
+ * allocation, etc.
+ */
+ switch (ent->device) {
+ case PCI_DEVICE_ID_QLOGIC_IB_6120:
+#ifdef CONFIG_PCI_MSI
+ dd = qib_init_iba6120_funcs(pdev, ent);
+#else
+ qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
+ "work if CONFIG_PCI_MSI is not enabled\n",
+ ent->device);
+#endif
+ break;
+
+ case PCI_DEVICE_ID_QLOGIC_IB_7220:
+ dd = qib_init_iba7220_funcs(pdev, ent);
+ break;
+
+ case PCI_DEVICE_ID_QLOGIC_IB_7322:
+ dd = qib_init_iba7322_funcs(pdev, ent);
+ break;
+
+ default:
+ qib_early_err(&pdev->dev, "Failing on unknown QLogic "
+ "deviceid 0x%x\n", ent->device);
+ ret = -ENODEV;
+ }
+
+ if (IS_ERR(dd))
+ ret = PTR_ERR(dd);
+ if (ret)
+ goto bail; /* error already printed */
+
+ /* do the generic initialization */
+ initfail = qib_init(dd, 0);
+
+ ret = qib_register_ib_device(dd);
+
+ /*
+ * Now ready for use. this should be cleared whenever we
+ * detect a reset, or initiate one. If earlier failure,
+ * we still create devices, so diags, etc. can be used
+ * to determine cause of problem.
+ */
+ if (!qib_mini_init && !initfail && !ret)
+ dd->flags |= QIB_INITTED;
+
+ j = qib_device_create(dd);
+ if (j)
+ qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
+ j = qibfs_add(dd);
+ if (j)
+ qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
+ -j);
+
+ if (qib_mini_init || initfail || ret) {
+ qib_stop_timers(dd);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ dd->f_quiet_serdes(dd->pport + pidx);
+ if (initfail)
+ ret = initfail;
+ goto bail;
+ }
+
+ if (!qib_wc_pat) {
+ ret = qib_enable_wc(dd);
+ if (ret) {
+ qib_dev_err(dd, "Write combining not enabled "
+ "(err %d): performance may be poor\n",
+ -ret);
+ ret = 0;
+ }
+ }
+
+ qib_verify_pioperf(dd);
+bail:
+ return ret;
+}
+
+static void __devexit qib_remove_one(struct pci_dev *pdev)
+{
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ int ret;
+
+ /* unregister from IB core */
+ qib_unregister_ib_device(dd);
+
+ /*
+ * Disable the IB link, disable interrupts on the device,
+ * clear dma engines, etc.
+ */
+ if (!qib_mini_init)
+ qib_shutdown_device(dd);
+
+ qib_stop_timers(dd);
+
+ /* wait until all of our (qsfp) schedule_work() calls complete */
+ flush_scheduled_work();
+
+ ret = qibfs_remove(dd);
+ if (ret)
+ qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
+ -ret);
+
+ qib_device_remove(dd);
+
+ qib_postinit_cleanup(dd);
+}
+
+/**
+ * qib_create_rcvhdrq - create a receive header queue
+ * @dd: the qlogic_ib device
+ * @rcd: the context data
+ *
+ * This must be contiguous memory (from an i/o perspective), and must be
+ * DMA'able (which means for some systems, it will go through an IOMMU,
+ * or be forced into a low address range).
+ */
+int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
+{
+ unsigned amt;
+
+ if (!rcd->rcvhdrq) {
+ dma_addr_t phys_hdrqtail;
+ gfp_t gfp_flags;
+
+ amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE);
+ gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
+ GFP_USER : GFP_KERNEL;
+ rcd->rcvhdrq = dma_alloc_coherent(
+ &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
+ gfp_flags | __GFP_COMP);
+
+ if (!rcd->rcvhdrq) {
+ qib_dev_err(dd, "attempt to allocate %d bytes "
+ "for ctxt %u rcvhdrq failed\n",
+ amt, rcd->ctxt);
+ goto bail;
+ }
+
+ if (rcd->ctxt >= dd->first_user_ctxt) {
+ rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
+ if (!rcd->user_event_mask)
+ goto bail_free_hdrq;
+ }
+
+ if (!(dd->flags & QIB_NODMA_RTAIL)) {
+ rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
+ &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
+ gfp_flags);
+ if (!rcd->rcvhdrtail_kvaddr)
+ goto bail_free;
+ rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
+ }
+
+ rcd->rcvhdrq_size = amt;
+ }
+
+ /* clear for security and sanity on each use */
+ memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
+ if (rcd->rcvhdrtail_kvaddr)
+ memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
+ return 0;
+
+bail_free:
+ qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u "
+ "rcvhdrqtailaddr failed\n", rcd->ctxt);
+ vfree(rcd->user_event_mask);
+ rcd->user_event_mask = NULL;
+bail_free_hdrq:
+ dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
+ rcd->rcvhdrq_phys);
+ rcd->rcvhdrq = NULL;
+bail:
+ return -ENOMEM;
+}
+
+/**
+ * allocate eager buffers, both kernel and user contexts.
+ * @rcd: the context we are setting up.
+ *
+ * Allocate the eager TID buffers and program them into hip.
+ * They are no longer completely contiguous, we do multiple allocation
+ * calls. Otherwise we get the OOM code involved, by asking for too
+ * much per call, with disastrous results on some kernels.
+ */
+int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
+ size_t size;
+ gfp_t gfp_flags;
+
+ /*
+ * GFP_USER, but without GFP_FS, so buffer cache can be
+ * coalesced (we hope); otherwise, even at order 4,
+ * heavy filesystem activity makes these fail, and we can
+ * use compound pages.
+ */
+ gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+
+ egrcnt = rcd->rcvegrcnt;
+ egroff = rcd->rcvegr_tid_base;
+ egrsize = dd->rcvegrbufsize;
+
+ chunk = rcd->rcvegrbuf_chunks;
+ egrperchunk = rcd->rcvegrbufs_perchunk;
+ size = rcd->rcvegrbuf_size;
+ if (!rcd->rcvegrbuf) {
+ rcd->rcvegrbuf =
+ kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]),
+ GFP_KERNEL);
+ if (!rcd->rcvegrbuf)
+ goto bail;
+ }
+ if (!rcd->rcvegrbuf_phys) {
+ rcd->rcvegrbuf_phys =
+ kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
+ GFP_KERNEL);
+ if (!rcd->rcvegrbuf_phys)
+ goto bail_rcvegrbuf;
+ }
+ for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
+ if (rcd->rcvegrbuf[e])
+ continue;
+ rcd->rcvegrbuf[e] =
+ dma_alloc_coherent(&dd->pcidev->dev, size,
+ &rcd->rcvegrbuf_phys[e],
+ gfp_flags);
+ if (!rcd->rcvegrbuf[e])
+ goto bail_rcvegrbuf_phys;
+ }
+
+ rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
+
+ for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
+ dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
+ unsigned i;
+
+ for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
+ dd->f_put_tid(dd, e + egroff +
+ (u64 __iomem *)
+ ((char __iomem *)
+ dd->kregbase +
+ dd->rcvegrbase),
+ RCVHQ_RCV_TYPE_EAGER, pa);
+ pa += egrsize;
+ }
+ cond_resched(); /* don't hog the cpu */
+ }
+
+ return 0;
+
+bail_rcvegrbuf_phys:
+ for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
+ dma_free_coherent(&dd->pcidev->dev, size,
+ rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
+ kfree(rcd->rcvegrbuf_phys);
+ rcd->rcvegrbuf_phys = NULL;
+bail_rcvegrbuf:
+ kfree(rcd->rcvegrbuf);
+ rcd->rcvegrbuf = NULL;
+bail:
+ return -ENOMEM;
+}
+
+int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
+{
+ u64 __iomem *qib_kregbase = NULL;
+ void __iomem *qib_piobase = NULL;
+ u64 __iomem *qib_userbase = NULL;
+ u64 qib_kreglen;
+ u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
+ u64 qib_pio4koffset = dd->piobufbase >> 32;
+ u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
+ u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
+ u64 qib_physaddr = dd->physaddr;
+ u64 qib_piolen;
+ u64 qib_userlen = 0;
+
+ /*
+ * Free the old mapping because the kernel will try to reuse the
+ * old mapping and not create a new mapping with the
+ * write combining attribute.
+ */
+ iounmap(dd->kregbase);
+ dd->kregbase = NULL;
+
+ /*
+ * Assumes chip address space looks like:
+ * - kregs + sregs + cregs + uregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * or:
+ * - kregs + sregs + cregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * - uregs
+ */
+ if (dd->piobcnt4k == 0) {
+ qib_kreglen = qib_pio2koffset;
+ qib_piolen = qib_pio2klen;
+ } else if (qib_pio2koffset < qib_pio4koffset) {
+ qib_kreglen = qib_pio2koffset;
+ qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
+ } else {
+ qib_kreglen = qib_pio4koffset;
+ qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
+ }
+ qib_piolen += vl15buflen;
+ /* Map just the configured ports (not all hw ports) */
+ if (dd->uregbase > qib_kreglen)
+ qib_userlen = dd->ureg_align * dd->cfgctxts;
+
+ /* Sanity checks passed, now create the new mappings */
+ qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
+ if (!qib_kregbase)
+ goto bail;
+
+ qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
+ if (!qib_piobase)
+ goto bail_kregbase;
+
+ if (qib_userlen) {
+ qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
+ qib_userlen);
+ if (!qib_userbase)
+ goto bail_piobase;
+ }
+
+ dd->kregbase = qib_kregbase;
+ dd->kregend = (u64 __iomem *)
+ ((char __iomem *) qib_kregbase + qib_kreglen);
+ dd->piobase = qib_piobase;
+ dd->pio2kbase = (void __iomem *)
+ (((char __iomem *) dd->piobase) +
+ qib_pio2koffset - qib_kreglen);
+ if (dd->piobcnt4k)
+ dd->pio4kbase = (void __iomem *)
+ (((char __iomem *) dd->piobase) +
+ qib_pio4koffset - qib_kreglen);
+ if (qib_userlen)
+ /* ureg will now be accessed relative to dd->userbase */
+ dd->userbase = qib_userbase;
+ return 0;
+
+bail_piobase:
+ iounmap(qib_piobase);
+bail_kregbase:
+ iounmap(qib_kregbase);
+bail:
+ return -ENOMEM;
+}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
new file mode 100644
index 000000000000..54a40828a106
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/**
+ * qib_format_hwmsg - format a single hwerror message
+ * @msg message buffer
+ * @msgl length of message buffer
+ * @hwmsg message to add to message buffer
+ */
+static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
+{
+ strlcat(msg, "[", msgl);
+ strlcat(msg, hwmsg, msgl);
+ strlcat(msg, "]", msgl);
+}
+
+/**
+ * qib_format_hwerrors - format hardware error messages for display
+ * @hwerrs hardware errors bit vector
+ * @hwerrmsgs hardware error descriptions
+ * @nhwerrmsgs number of hwerrmsgs
+ * @msg message buffer
+ * @msgl message buffer length
+ */
+void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
+ size_t nhwerrmsgs, char *msg, size_t msgl)
+{
+ int i;
+
+ for (i = 0; i < nhwerrmsgs; i++)
+ if (hwerrs & hwerrmsgs[i].mask)
+ qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
+}
+
+static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
+{
+ struct ib_event event;
+ struct qib_devdata *dd = ppd->dd;
+
+ event.device = &dd->verbs_dev.ibdev;
+ event.element.port_num = ppd->port;
+ event.event = ev;
+ ib_dispatch_event(&event);
+}
+
+void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+ u32 lstate;
+ u8 ltstate;
+ enum ib_event_type ev = 0;
+
+ lstate = dd->f_iblink_state(ibcs); /* linkstate */
+ ltstate = dd->f_ibphys_portstate(ibcs);
+
+ /*
+ * If linkstate transitions into INIT from any of the various down
+ * states, or if it transitions from any of the up (INIT or better)
+ * states into any of the down states (except link recovery), then
+ * call the chip-specific code to take appropriate actions.
+ */
+ if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
+ ltstate == IB_PHYSPORTSTATE_LINKUP) {
+ /* transitioned to UP */
+ if (dd->f_ib_updown(ppd, 1, ibcs))
+ goto skip_ibchange; /* chip-code handled */
+ } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
+ if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
+ ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
+ dd->f_ib_updown(ppd, 0, ibcs))
+ goto skip_ibchange; /* chip-code handled */
+ qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
+ }
+
+ if (lstate != IB_PORT_DOWN) {
+ /* lstate is INIT, ARMED, or ACTIVE */
+ if (lstate != IB_PORT_ACTIVE) {
+ *ppd->statusp &= ~QIB_STATUS_IB_READY;
+ if (ppd->lflags & QIBL_LINKACTIVE)
+ ev = IB_EVENT_PORT_ERR;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ if (lstate == IB_PORT_ARMED) {
+ ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKINIT |
+ QIBL_LINKDOWN | QIBL_LINKACTIVE);
+ } else {
+ ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKARMED |
+ QIBL_LINKDOWN | QIBL_LINKACTIVE);
+ }
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ /* start a 75msec timer to clear symbol errors */
+ mod_timer(&ppd->symerr_clear_timer,
+ msecs_to_jiffies(75));
+ } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
+ /* active, but not active defered */
+ qib_hol_up(ppd); /* useful only for 6120 now */
+ *ppd->statusp |=
+ QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
+ qib_clear_symerror_on_linkup((unsigned long)ppd);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKINIT |
+ QIBL_LINKDOWN | QIBL_LINKARMED);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ qib_sdma_process_event(ppd,
+ qib_sdma_event_e30_go_running);
+ ev = IB_EVENT_PORT_ACTIVE;
+ dd->f_setextled(ppd, 1);
+ }
+ } else { /* down */
+ if (ppd->lflags & QIBL_LINKACTIVE)
+ ev = IB_EVENT_PORT_ERR;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKINIT |
+ QIBL_LINKACTIVE | QIBL_LINKARMED);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ *ppd->statusp &= ~QIB_STATUS_IB_READY;
+ }
+
+skip_ibchange:
+ ppd->lastibcstat = ibcs;
+ if (ev)
+ signal_ib_event(ppd, ev);
+ return;
+}
+
+void qib_clear_symerror_on_linkup(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+ if (ppd->lflags & QIBL_LINKACTIVE)
+ return;
+
+ ppd->ibport_data.z_symbol_error_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
+}
+
+/*
+ * Handle receive interrupts for user ctxts; this means a user
+ * process was waiting for a packet to arrive, and didn't want
+ * to poll.
+ */
+void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
+ if (!(ctxtr & (1ULL << i)))
+ continue;
+ rcd = dd->rcd[i];
+ if (!rcd || !rcd->cnt)
+ continue;
+
+ if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
+ wake_up_interruptible(&rcd->wait);
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
+ rcd->ctxt);
+ } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
+ &rcd->flag)) {
+ rcd->urgent++;
+ wake_up_interruptible(&rcd->wait);
+ }
+ }
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+}
+
+void qib_bad_intrstatus(struct qib_devdata *dd)
+{
+ static int allbits;
+
+ /* separate routine, for better optimization of qib_intr() */
+
+ /*
+ * We print the message and disable interrupts, in hope of
+ * having a better chance of debugging the problem.
+ */
+ qib_dev_err(dd, "Read of chip interrupt status failed"
+ " disabling interrupts\n");
+ if (allbits++) {
+ /* disable interrupt delivery, something is very wrong */
+ if (allbits == 2)
+ dd->f_set_intr_state(dd, 0);
+ if (allbits == 3) {
+ qib_dev_err(dd, "2nd bad interrupt status, "
+ "unregistering interrupts\n");
+ dd->flags |= QIB_BADINTR;
+ dd->flags &= ~QIB_INITTED;
+ dd->f_free_irq(dd);
+ }
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
new file mode 100644
index 000000000000..4b80eb153d57
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/**
+ * qib_alloc_lkey - allocate an lkey
+ * @rkt: lkey table in which to allocate the lkey
+ * @mr: memory region that this lkey protects
+ *
+ * Returns 1 if successful, otherwise returns 0.
+ */
+
+int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
+{
+ unsigned long flags;
+ u32 r;
+ u32 n;
+ int ret;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+
+ /* Find the next available LKEY */
+ r = rkt->next;
+ n = r;
+ for (;;) {
+ if (rkt->table[r] == NULL)
+ break;
+ r = (r + 1) & (rkt->max - 1);
+ if (r == n) {
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = 0;
+ goto bail;
+ }
+ }
+ rkt->next = (r + 1) & (rkt->max - 1);
+ /*
+ * Make sure lkey is never zero which is reserved to indicate an
+ * unrestricted LKEY.
+ */
+ rkt->gen++;
+ mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
+ ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
+ << 8);
+ if (mr->lkey == 0) {
+ mr->lkey |= 1 << 8;
+ rkt->gen++;
+ }
+ rkt->table[r] = mr;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_free_lkey - free an lkey
+ * @rkt: table from which to free the lkey
+ * @lkey: lkey id to free
+ */
+int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
+{
+ unsigned long flags;
+ u32 lkey = mr->lkey;
+ u32 r;
+ int ret;
+
+ spin_lock_irqsave(&dev->lk_table.lock, flags);
+ if (lkey == 0) {
+ if (dev->dma_mr && dev->dma_mr == mr) {
+ ret = atomic_read(&dev->dma_mr->refcount);
+ if (!ret)
+ dev->dma_mr = NULL;
+ } else
+ ret = 0;
+ } else {
+ r = lkey >> (32 - ib_qib_lkey_table_size);
+ ret = atomic_read(&dev->lk_table.table[r]->refcount);
+ if (!ret)
+ dev->lk_table.table[r] = NULL;
+ }
+ spin_unlock_irqrestore(&dev->lk_table.lock, flags);
+
+ if (ret)
+ ret = -EBUSY;
+ return ret;
+}
+
+/**
+ * qib_lkey_ok - check IB SGE for validity and initialize
+ * @rkt: table containing lkey to check SGE against
+ * @isge: outgoing internal SGE
+ * @sge: SGE to check
+ * @acc: access flags
+ *
+ * Return 1 if valid and successful, otherwise returns 0.
+ *
+ * Check the IB SGE for validity and initialize our internal version
+ * of it.
+ */
+int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
+ struct qib_sge *isge, struct ib_sge *sge, int acc)
+{
+ struct qib_mregion *mr;
+ unsigned n, m;
+ size_t off;
+ int ret = 0;
+ unsigned long flags;
+
+ /*
+ * We use LKEY == zero for kernel virtual addresses
+ * (see qib_get_dma_mr and qib_dma.c).
+ */
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (sge->lkey == 0) {
+ struct qib_ibdev *dev = to_idev(pd->ibpd.device);
+
+ if (pd->user)
+ goto bail;
+ if (!dev->dma_mr)
+ goto bail;
+ atomic_inc(&dev->dma_mr->refcount);
+ isge->mr = dev->dma_mr;
+ isge->vaddr = (void *) sge->addr;
+ isge->length = sge->length;
+ isge->sge_length = sge->length;
+ isge->m = 0;
+ isge->n = 0;
+ goto ok;
+ }
+ mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
+ if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
+ mr->pd != &pd->ibpd))
+ goto bail;
+
+ off = sge->addr - mr->user_base;
+ if (unlikely(sge->addr < mr->user_base ||
+ off + sge->length > mr->length ||
+ (mr->access_flags & acc) != acc))
+ goto bail;
+
+ off += mr->offset;
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ atomic_inc(&mr->refcount);
+ isge->mr = mr;
+ isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ isge->length = mr->map[m]->segs[n].length - off;
+ isge->sge_length = sge->length;
+ isge->m = m;
+ isge->n = n;
+ok:
+ ret = 1;
+bail:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return ret;
+}
+
+/**
+ * qib_rkey_ok - check the IB virtual address, length, and RKEY
+ * @dev: infiniband device
+ * @ss: SGE state
+ * @len: length of data
+ * @vaddr: virtual address to place data
+ * @rkey: rkey to check
+ * @acc: access flags
+ *
+ * Return 1 if successful, otherwise 0.
+ */
+int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc)
+{
+ struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+ struct qib_mregion *mr;
+ unsigned n, m;
+ size_t off;
+ int ret = 0;
+ unsigned long flags;
+
+ /*
+ * We use RKEY == zero for kernel virtual addresses
+ * (see qib_get_dma_mr and qib_dma.c).
+ */
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (rkey == 0) {
+ struct qib_pd *pd = to_ipd(qp->ibqp.pd);
+ struct qib_ibdev *dev = to_idev(pd->ibpd.device);
+
+ if (pd->user)
+ goto bail;
+ if (!dev->dma_mr)
+ goto bail;
+ atomic_inc(&dev->dma_mr->refcount);
+ sge->mr = dev->dma_mr;
+ sge->vaddr = (void *) vaddr;
+ sge->length = len;
+ sge->sge_length = len;
+ sge->m = 0;
+ sge->n = 0;
+ goto ok;
+ }
+
+ mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
+ if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ off = vaddr - mr->iova;
+ if (unlikely(vaddr < mr->iova || off + len > mr->length ||
+ (mr->access_flags & acc) == 0))
+ goto bail;
+
+ off += mr->offset;
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ atomic_inc(&mr->refcount);
+ sge->mr = mr;
+ sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ sge->length = mr->map[m]->segs[n].length - off;
+ sge->sge_length = len;
+ sge->m = m;
+ sge->n = n;
+ok:
+ ret = 1;
+bail:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return ret;
+}
+
+/*
+ * Initialize the memory region specified by the work reqeust.
+ */
+int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
+{
+ struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+ struct qib_pd *pd = to_ipd(qp->ibqp.pd);
+ struct qib_mregion *mr;
+ u32 rkey = wr->wr.fast_reg.rkey;
+ unsigned i, n, m;
+ int ret = -EINVAL;
+ unsigned long flags;
+ u64 *page_list;
+ size_t ps;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (pd->user || rkey == 0)
+ goto bail;
+
+ mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
+ if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ if (wr->wr.fast_reg.page_list_len > mr->max_segs)
+ goto bail;
+
+ ps = 1UL << wr->wr.fast_reg.page_shift;
+ if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
+ goto bail;
+
+ mr->user_base = wr->wr.fast_reg.iova_start;
+ mr->iova = wr->wr.fast_reg.iova_start;
+ mr->lkey = rkey;
+ mr->length = wr->wr.fast_reg.length;
+ mr->access_flags = wr->wr.fast_reg.access_flags;
+ page_list = wr->wr.fast_reg.page_list->page_list;
+ m = 0;
+ n = 0;
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ mr->map[m]->segs[n].vaddr = (void *) page_list[i];
+ mr->map[m]->segs[n].length = ps;
+ if (++n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+
+ ret = 0;
+bail:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
new file mode 100644
index 000000000000..94b0d1f3a8f0
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -0,0 +1,2173 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+static int reply(struct ib_smp *smp)
+{
+ /*
+ * The verbs framework will handle the directed/LID route
+ * packet changes.
+ */
+ smp->method = IB_MGMT_METHOD_GET_RESP;
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ smp->status |= IB_SMP_DIRECTION;
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
+{
+ struct ib_mad_send_buf *send_buf;
+ struct ib_mad_agent *agent;
+ struct ib_smp *smp;
+ int ret;
+ unsigned long flags;
+ unsigned long timeout;
+
+ agent = ibp->send_agent;
+ if (!agent)
+ return;
+
+ /* o14-3.2.1 */
+ if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
+ return;
+
+ /* o14-2 */
+ if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
+ return;
+
+ send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
+ IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ if (IS_ERR(send_buf))
+ return;
+
+ smp = send_buf->mad;
+ smp->base_version = IB_MGMT_BASE_VERSION;
+ smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ smp->class_version = 1;
+ smp->method = IB_MGMT_METHOD_TRAP;
+ ibp->tid++;
+ smp->tid = cpu_to_be64(ibp->tid);
+ smp->attr_id = IB_SMP_ATTR_NOTICE;
+ /* o14-1: smp->mkey = 0; */
+ memcpy(smp->data, data, len);
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ if (!ibp->sm_ah) {
+ if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
+ struct ib_ah *ah;
+ struct ib_ah_attr attr;
+
+ memset(&attr, 0, sizeof attr);
+ attr.dlid = ibp->sm_lid;
+ attr.port_num = ppd_from_ibp(ibp)->port;
+ ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
+ if (IS_ERR(ah))
+ ret = -EINVAL;
+ else {
+ send_buf->ah = ah;
+ ibp->sm_ah = to_iah(ah);
+ ret = 0;
+ }
+ } else
+ ret = -EINVAL;
+ } else {
+ send_buf->ah = &ibp->sm_ah->ibah;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ if (!ret)
+ ret = ib_post_send_mad(send_buf, NULL);
+ if (!ret) {
+ /* 4.096 usec. */
+ timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
+ ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
+ } else {
+ ib_free_send_mad(send_buf);
+ ibp->trap_timeout = 0;
+ }
+}
+
+/*
+ * Send a bad [PQ]_Key trap (ch. 14.3.8).
+ */
+void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
+ u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
+{
+ struct ib_mad_notice_attr data;
+
+ if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
+ ibp->pkey_violations++;
+ else
+ ibp->qkey_violations++;
+ ibp->n_pkt_drops++;
+
+ /* Send violation trap */
+ data.generic_type = IB_NOTICE_TYPE_SECURITY;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = trap_num;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_257_258.lid1 = lid1;
+ data.details.ntc_257_258.lid2 = lid2;
+ data.details.ntc_257_258.key = cpu_to_be32(key);
+ data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
+ data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a bad M_Key trap (ch. 14.3.9).
+ */
+static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
+{
+ struct ib_mad_notice_attr data;
+
+ /* Send violation trap */
+ data.generic_type = IB_NOTICE_TYPE_SECURITY;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_256.lid = data.issuer_lid;
+ data.details.ntc_256.method = smp->method;
+ data.details.ntc_256.attr_id = smp->attr_id;
+ data.details.ntc_256.attr_mod = smp->attr_mod;
+ data.details.ntc_256.mkey = smp->mkey;
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ u8 hop_cnt;
+
+ data.details.ntc_256.dr_slid = smp->dr_slid;
+ data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
+ hop_cnt = smp->hop_cnt;
+ if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
+ data.details.ntc_256.dr_trunc_hop |=
+ IB_NOTICE_TRAP_DR_TRUNC;
+ hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
+ }
+ data.details.ntc_256.dr_trunc_hop |= hop_cnt;
+ memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
+ hop_cnt);
+ }
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a Port Capability Mask Changed trap (ch. 14.3.11).
+ */
+void qib_cap_mask_chg(struct qib_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_144.lid = data.issuer_lid;
+ data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a System Image GUID Changed trap (ch. 14.3.12).
+ */
+void qib_sys_guid_chg(struct qib_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_145.lid = data.issuer_lid;
+ data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a Node Description Changed trap (ch. 14.3.13).
+ */
+void qib_node_desc_chg(struct qib_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_144.lid = data.issuer_lid;
+ data.details.ntc_144.local_changes = 1;
+ data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+static int subn_get_nodedescription(struct ib_smp *smp,
+ struct ib_device *ibdev)
+{
+ if (smp->attr_mod)
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
+
+ return reply(smp);
+}
+
+static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ u32 vendor, majrev, minrev;
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ /* GUID 0 is illegal */
+ if (smp->attr_mod || pidx >= dd->num_pports ||
+ dd->pport[pidx].guid == 0)
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else
+ nip->port_guid = dd->pport[pidx].guid;
+
+ nip->base_version = 1;
+ nip->class_version = 1;
+ nip->node_type = 1; /* channel adapter */
+ nip->num_ports = ibdev->phys_port_cnt;
+ /* This is already in network order */
+ nip->sys_guid = ib_qib_sys_image_guid;
+ nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
+ nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
+ nip->device_id = cpu_to_be16(dd->deviceid);
+ majrev = dd->majrev;
+ minrev = dd->minrev;
+ nip->revision = cpu_to_be32((majrev << 16) | minrev);
+ nip->local_port_num = port;
+ vendor = dd->vendorid;
+ nip->vendor_id[0] = QIB_SRC_OUI_1;
+ nip->vendor_id[1] = QIB_SRC_OUI_2;
+ nip->vendor_id[2] = QIB_SRC_OUI_3;
+
+ return reply(smp);
+}
+
+static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
+ __be64 *p = (__be64 *) smp->data;
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ /* 32 blocks of 8 64-bit GUIDs per block */
+
+ memset(smp->data, 0, sizeof(smp->data));
+
+ if (startgx == 0 && pidx < dd->num_pports) {
+ struct qib_pportdata *ppd = dd->pport + pidx;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ __be64 g = ppd->guid;
+ unsigned i;
+
+ /* GUID 0 is illegal */
+ if (g == 0)
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else {
+ /* The first is a copy of the read-only HW GUID. */
+ p[0] = g;
+ for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
+ p[i] = ibp->guids[i - 1];
+ }
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply(smp);
+}
+
+static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
+}
+
+static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
+}
+
+static int get_overrunthreshold(struct qib_pportdata *ppd)
+{
+ return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
+}
+
+/**
+ * set_overrunthreshold - set the overrun threshold
+ * @ppd: the physical port data
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
+ (u32)n);
+ return 0;
+}
+
+static int get_phyerrthreshold(struct qib_pportdata *ppd)
+{
+ return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
+}
+
+/**
+ * set_phyerrthreshold - set the physical error threshold
+ * @ppd: the physical port data
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
+ (u32)n);
+ return 0;
+}
+
+/**
+ * get_linkdowndefaultstate - get the default linkdown state
+ * @ppd: the physical port data
+ *
+ * Returns zero if the default is POLL, 1 if the default is SLEEP.
+ */
+static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
+{
+ return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
+ IB_LINKINITCMD_SLEEP;
+}
+
+static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
+{
+ int ret = 0;
+
+ /* Is the mkey in the process of expiring? */
+ if (ibp->mkey_lease_timeout &&
+ time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
+ /* Clear timeout and mkey protection field. */
+ ibp->mkey_lease_timeout = 0;
+ ibp->mkeyprot = 0;
+ }
+
+ /* M_Key checking depends on Portinfo:M_Key_protect_bits */
+ if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 &&
+ ibp->mkey != smp->mkey &&
+ (smp->method == IB_MGMT_METHOD_SET ||
+ smp->method == IB_MGMT_METHOD_TRAP_REPRESS ||
+ (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) {
+ if (ibp->mkey_violations != 0xFFFF)
+ ++ibp->mkey_violations;
+ if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
+ ibp->mkey_lease_timeout = jiffies +
+ ibp->mkey_lease_period * HZ;
+ /* Generate a trap notice. */
+ qib_bad_mkey(ibp, smp);
+ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ } else if (ibp->mkey_lease_timeout)
+ ibp->mkey_lease_timeout = 0;
+
+ return ret;
+}
+
+static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ struct qib_ibport *ibp;
+ struct ib_port_info *pip = (struct ib_port_info *)smp->data;
+ u16 lid;
+ u8 mtu;
+ int ret;
+ u32 state;
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ if (port_num == 0)
+ port_num = port;
+ else {
+ if (port_num > ibdev->phys_port_cnt) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ ret = reply(smp);
+ goto bail;
+ }
+ if (port_num != port) {
+ ibp = to_iport(ibdev, port_num);
+ ret = check_mkey(ibp, smp, 0);
+ if (ret)
+ goto bail;
+ }
+ }
+
+ dd = dd_from_ibdev(ibdev);
+ /* IB numbers ports from 1, hdw from 0 */
+ ppd = dd->pport + (port_num - 1);
+ ibp = &ppd->ibport_data;
+
+ /* Clear all fields. Only set the non-zero fields. */
+ memset(smp->data, 0, sizeof(smp->data));
+
+ /* Only return the mkey if the protection field allows it. */
+ if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey ||
+ ibp->mkeyprot == 0)
+ pip->mkey = ibp->mkey;
+ pip->gid_prefix = ibp->gid_prefix;
+ lid = ppd->lid;
+ pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
+ pip->sm_lid = cpu_to_be16(ibp->sm_lid);
+ pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
+ /* pip->diag_code; */
+ pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
+ pip->local_port_num = port;
+ pip->link_width_enabled = ppd->link_width_enabled;
+ pip->link_width_supported = ppd->link_width_supported;
+ pip->link_width_active = ppd->link_width_active;
+ state = dd->f_iblink_state(ppd->lastibcstat);
+ pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
+
+ pip->portphysstate_linkdown =
+ (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
+ (get_linkdowndefaultstate(ppd) ? 1 : 2);
+ pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
+ pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
+ ppd->link_speed_enabled;
+ switch (ppd->ibmtu) {
+ default: /* something is wrong; fall through */
+ case 4096:
+ mtu = IB_MTU_4096;
+ break;
+ case 2048:
+ mtu = IB_MTU_2048;
+ break;
+ case 1024:
+ mtu = IB_MTU_1024;
+ break;
+ case 512:
+ mtu = IB_MTU_512;
+ break;
+ case 256:
+ mtu = IB_MTU_256;
+ break;
+ }
+ pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
+ pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
+ pip->vl_high_limit = ibp->vl_high_limit;
+ pip->vl_arb_high_cap =
+ dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
+ pip->vl_arb_low_cap =
+ dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
+ /* InitTypeReply = 0 */
+ pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
+ /* HCAs ignore VLStallCount and HOQLife */
+ /* pip->vlstallcnt_hoqlife; */
+ pip->operationalvl_pei_peo_fpi_fpo =
+ dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
+ pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
+ /* P_KeyViolations are counted by hardware. */
+ pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
+ pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
+ /* Only the hardware GUID is supported for now */
+ pip->guid_cap = QIB_GUIDS_PER_PORT;
+ pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
+ /* 32.768 usec. response time (guessing) */
+ pip->resv_resptimevalue = 3;
+ pip->localphyerrors_overrunerrors =
+ (get_phyerrthreshold(ppd) << 4) |
+ get_overrunthreshold(ppd);
+ /* pip->max_credit_hint; */
+ if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
+ u32 v;
+
+ v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
+ pip->link_roundtrip_latency[0] = v >> 16;
+ pip->link_roundtrip_latency[1] = v >> 8;
+ pip->link_roundtrip_latency[2] = v;
+ }
+
+ ret = reply(smp);
+
+bail:
+ return ret;
+}
+
+/**
+ * get_pkeys - return the PKEY table
+ * @dd: the qlogic_ib device
+ * @port: the IB port number
+ * @pkeys: the pkey table is placed here
+ */
+static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
+{
+ struct qib_pportdata *ppd = dd->pport + port - 1;
+ /*
+ * always a kernel context, no locking needed.
+ * If we get here with ppd setup, no need to check
+ * that pd is valid.
+ */
+ struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
+
+ memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
+
+ return 0;
+}
+
+static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
+ u16 *p = (u16 *) smp->data;
+ __be16 *q = (__be16 *) smp->data;
+
+ /* 64 blocks of 32 16-bit P_Key entries */
+
+ memset(smp->data, 0, sizeof(smp->data));
+ if (startpx == 0) {
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned i, n = qib_get_npkeys(dd);
+
+ get_pkeys(dd, port, p);
+
+ for (i = 0; i < n; i++)
+ q[i] = cpu_to_be16(p[i]);
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply(smp);
+}
+
+static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
+ __be64 *p = (__be64 *) smp->data;
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ /* 32 blocks of 8 64-bit GUIDs per block */
+
+ if (startgx == 0 && pidx < dd->num_pports) {
+ struct qib_pportdata *ppd = dd->pport + pidx;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ unsigned i;
+
+ /* The first entry is read-only. */
+ for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
+ ibp->guids[i - 1] = p[i];
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ /* The only GUID we support is the first read-only entry. */
+ return subn_get_guidinfo(smp, ibdev, port);
+}
+
+/**
+ * subn_set_portinfo - set port information
+ * @smp: the incoming SM packet
+ * @ibdev: the infiniband device
+ * @port: the port on the device
+ *
+ * Set Portinfo (see ch. 14.2.5.6).
+ */
+static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct ib_port_info *pip = (struct ib_port_info *)smp->data;
+ struct ib_event event;
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ struct qib_ibport *ibp;
+ char clientrereg = 0;
+ unsigned long flags;
+ u16 lid, smlid;
+ u8 lwe;
+ u8 lse;
+ u8 state;
+ u8 vls;
+ u8 msl;
+ u16 lstate;
+ int ret, ore, mtu;
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ if (port_num == 0)
+ port_num = port;
+ else {
+ if (port_num > ibdev->phys_port_cnt)
+ goto err;
+ /* Port attributes can only be set on the receiving port */
+ if (port_num != port)
+ goto get_only;
+ }
+
+ dd = dd_from_ibdev(ibdev);
+ /* IB numbers ports from 1, hdw from 0 */
+ ppd = dd->pport + (port_num - 1);
+ ibp = &ppd->ibport_data;
+ event.device = ibdev;
+ event.element.port_num = port;
+
+ ibp->mkey = pip->mkey;
+ ibp->gid_prefix = pip->gid_prefix;
+ ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
+
+ lid = be16_to_cpu(pip->lid);
+ /* Must be a valid unicast LID address. */
+ if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
+ goto err;
+ if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
+ if (ppd->lid != lid)
+ qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
+ if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
+ qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
+ qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
+ event.event = IB_EVENT_LID_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ smlid = be16_to_cpu(pip->sm_lid);
+ msl = pip->neighbormtu_mastersmsl & 0xF;
+ /* Must be a valid unicast LID address. */
+ if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
+ goto err;
+ if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+ spin_lock_irqsave(&ibp->lock, flags);
+ if (ibp->sm_ah) {
+ if (smlid != ibp->sm_lid)
+ ibp->sm_ah->attr.dlid = smlid;
+ if (msl != ibp->sm_sl)
+ ibp->sm_ah->attr.sl = msl;
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+ if (smlid != ibp->sm_lid)
+ ibp->sm_lid = smlid;
+ if (msl != ibp->sm_sl)
+ ibp->sm_sl = msl;
+ event.event = IB_EVENT_SM_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ /* Allow 1x or 4x to be set (see 14.2.6.6). */
+ lwe = pip->link_width_enabled;
+ if (lwe) {
+ if (lwe == 0xFF)
+ lwe = ppd->link_width_supported;
+ else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
+ goto err;
+ set_link_width_enabled(ppd, lwe);
+ }
+
+ lse = pip->linkspeedactive_enabled & 0xF;
+ if (lse) {
+ /*
+ * The IB 1.2 spec. only allows link speed values
+ * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
+ * speeds.
+ */
+ if (lse == 15)
+ lse = ppd->link_speed_supported;
+ else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
+ goto err;
+ set_link_speed_enabled(ppd, lse);
+ }
+
+ /* Set link down default state. */
+ switch (pip->portphysstate_linkdown & 0xF) {
+ case 0: /* NOP */
+ break;
+ case 1: /* SLEEP */
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
+ IB_LINKINITCMD_SLEEP);
+ break;
+ case 2: /* POLL */
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
+ IB_LINKINITCMD_POLL);
+ break;
+ default:
+ goto err;
+ }
+
+ ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
+ ibp->vl_high_limit = pip->vl_high_limit;
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
+ ibp->vl_high_limit);
+
+ mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
+ if (mtu == -1)
+ goto err;
+ qib_set_mtu(ppd, mtu);
+
+ /* Set operational VLs */
+ vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
+ if (vls) {
+ if (vls > ppd->vls_supported)
+ goto err;
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
+ }
+
+ if (pip->mkey_violations == 0)
+ ibp->mkey_violations = 0;
+
+ if (pip->pkey_violations == 0)
+ ibp->pkey_violations = 0;
+
+ if (pip->qkey_violations == 0)
+ ibp->qkey_violations = 0;
+
+ ore = pip->localphyerrors_overrunerrors;
+ if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
+ goto err;
+
+ if (set_overrunthreshold(ppd, (ore & 0xF)))
+ goto err;
+
+ ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
+
+ if (pip->clientrereg_resv_subnetto & 0x80) {
+ clientrereg = 1;
+ event.event = IB_EVENT_CLIENT_REREGISTER;
+ ib_dispatch_event(&event);
+ }
+
+ /*
+ * Do the port state change now that the other link parameters
+ * have been set.
+ * Changing the port physical state only makes sense if the link
+ * is down or is being set to down.
+ */
+ state = pip->linkspeed_portstate & 0xF;
+ lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
+ if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
+ goto err;
+
+ /*
+ * Only state changes of DOWN, ARM, and ACTIVE are valid
+ * and must be in the correct state to take effect (see 7.2.6).
+ */
+ switch (state) {
+ case IB_PORT_NOP:
+ if (lstate == 0)
+ break;
+ /* FALLTHROUGH */
+ case IB_PORT_DOWN:
+ if (lstate == 0)
+ lstate = QIB_IB_LINKDOWN_ONLY;
+ else if (lstate == 1)
+ lstate = QIB_IB_LINKDOWN_SLEEP;
+ else if (lstate == 2)
+ lstate = QIB_IB_LINKDOWN;
+ else if (lstate == 3)
+ lstate = QIB_IB_LINKDOWN_DISABLE;
+ else
+ goto err;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_LINKV;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ qib_set_linkstate(ppd, lstate);
+ /*
+ * Don't send a reply if the response would be sent
+ * through the disabled port.
+ */
+ if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
+ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ goto done;
+ }
+ qib_wait_linkstate(ppd, QIBL_LINKV, 10);
+ break;
+ case IB_PORT_ARMED:
+ qib_set_linkstate(ppd, QIB_IB_LINKARM);
+ break;
+ case IB_PORT_ACTIVE:
+ qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
+ break;
+ default:
+ /* XXX We have already partially updated our state! */
+ goto err;
+ }
+
+ ret = subn_get_portinfo(smp, ibdev, port);
+
+ if (clientrereg)
+ pip->clientrereg_resv_subnetto |= 0x80;
+
+ goto done;
+
+err:
+ smp->status |= IB_SMP_INVALID_FIELD;
+get_only:
+ ret = subn_get_portinfo(smp, ibdev, port);
+done:
+ return ret;
+}
+
+/**
+ * rm_pkey - decrecment the reference count for the given PKEY
+ * @dd: the qlogic_ib device
+ * @key: the PKEY index
+ *
+ * Return true if this was the last reference and the hardware table entry
+ * needs to be changed.
+ */
+static int rm_pkey(struct qib_pportdata *ppd, u16 key)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (ppd->pkeys[i] != key)
+ continue;
+ if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
+ ppd->pkeys[i] = 0;
+ ret = 1;
+ goto bail;
+ }
+ break;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * add_pkey - add the given PKEY to the hardware table
+ * @dd: the qlogic_ib device
+ * @key: the PKEY
+ *
+ * Return an error code if unable to add the entry, zero if no change,
+ * or 1 if the hardware PKEY register needs to be updated.
+ */
+static int add_pkey(struct qib_pportdata *ppd, u16 key)
+{
+ int i;
+ u16 lkey = key & 0x7FFF;
+ int any = 0;
+ int ret;
+
+ if (lkey == 0x7FFF) {
+ ret = 0;
+ goto bail;
+ }
+
+ /* Look for an empty slot or a matching PKEY. */
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i]) {
+ any++;
+ continue;
+ }
+ /* If it matches exactly, try to increment the ref count */
+ if (ppd->pkeys[i] == key) {
+ if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
+ ret = 0;
+ goto bail;
+ }
+ /* Lost the race. Look for an empty slot below. */
+ atomic_dec(&ppd->pkeyrefs[i]);
+ any++;
+ }
+ /*
+ * It makes no sense to have both the limited and unlimited
+ * PKEY set at the same time since the unlimited one will
+ * disable the limited one.
+ */
+ if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (!any) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i] &&
+ atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
+ /* for qibstats, etc. */
+ ppd->pkeys[i] = key;
+ ret = 1;
+ goto bail;
+ }
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+/**
+ * set_pkeys - set the PKEY table for ctxt 0
+ * @dd: the qlogic_ib device
+ * @port: the IB port number
+ * @pkeys: the PKEY table
+ */
+static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
+{
+ struct qib_pportdata *ppd;
+ struct qib_ctxtdata *rcd;
+ int i;
+ int changed = 0;
+
+ /*
+ * IB port one/two always maps to context zero/one,
+ * always a kernel context, no locking needed
+ * If we get here with ppd setup, no need to check
+ * that rcd is valid.
+ */
+ ppd = dd->pport + (port - 1);
+ rcd = dd->rcd[ppd->hw_pidx];
+
+ for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+ u16 key = pkeys[i];
+ u16 okey = rcd->pkeys[i];
+
+ if (key == okey)
+ continue;
+ /*
+ * The value of this PKEY table entry is changing.
+ * Remove the old entry in the hardware's array of PKEYs.
+ */
+ if (okey & 0x7FFF)
+ changed |= rm_pkey(ppd, okey);
+ if (key & 0x7FFF) {
+ int ret = add_pkey(ppd, key);
+
+ if (ret < 0)
+ key = 0;
+ else
+ changed |= ret;
+ }
+ rcd->pkeys[i] = key;
+ }
+ if (changed) {
+ struct ib_event event;
+
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.device = &dd->verbs_dev.ibdev;
+ event.element.port_num = 1;
+ ib_dispatch_event(&event);
+ }
+ return 0;
+}
+
+static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
+ __be16 *p = (__be16 *) smp->data;
+ u16 *q = (u16 *) smp->data;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned i, n = qib_get_npkeys(dd);
+
+ for (i = 0; i < n; i++)
+ q[i] = be16_to_cpu(p[i]);
+
+ if (startpx != 0 || set_pkeys(dd, port, q) != 0)
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return subn_get_pkeytable(smp, ibdev, port);
+}
+
+static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = (u8 *) smp->data;
+ unsigned i;
+
+ memset(smp->data, 0, sizeof(smp->data));
+
+ if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ else
+ for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
+ *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
+
+ return reply(smp);
+}
+
+static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = (u8 *) smp->data;
+ unsigned i;
+
+ if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ return reply(smp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
+ ibp->sl_to_vl[i] = *p >> 4;
+ ibp->sl_to_vl[i + 1] = *p & 0xF;
+ }
+ qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
+ _QIB_EVENT_SL2VL_CHANGE_BIT);
+
+ return subn_get_sl_to_vl(smp, ibdev, port);
+}
+
+static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
+ struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+
+ memset(smp->data, 0, sizeof(smp->data));
+
+ if (ppd->vls_supported == IB_VL_VL0)
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ else if (which == IB_VLARB_LOWPRI_0_31)
+ (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
+ smp->data);
+ else if (which == IB_VLARB_HIGHPRI_0_31)
+ (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
+ smp->data);
+ else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply(smp);
+}
+
+static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
+ struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+
+ if (ppd->vls_supported == IB_VL_VL0)
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ else if (which == IB_VLARB_LOWPRI_0_31)
+ (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
+ smp->data);
+ else if (which == IB_VLARB_HIGHPRI_0_31)
+ (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
+ smp->data);
+ else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return subn_get_vl_arb(smp, ibdev, port);
+}
+
+static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ /*
+ * For now, we only send the trap once so no need to process this.
+ * o13-6, o13-7,
+ * o14-3.a4 The SMA shall not send any message in response to a valid
+ * SubnTrapRepress() message.
+ */
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+}
+
+static int pma_get_classportinfo(struct ib_perf *pmp,
+ struct ib_device *ibdev)
+{
+ struct ib_pma_classportinfo *p =
+ (struct ib_pma_classportinfo *)pmp->data;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ if (pmp->attr_mod != 0)
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ /* Note that AllPortSelect is not valid */
+ p->base_version = 1;
+ p->class_version = 1;
+ p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ /*
+ * Set the most significant bit of CM2 to indicate support for
+ * congestion statistics
+ */
+ p->reserved[0] = dd->psxmitwait_supported << 7;
+ /*
+ * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
+ */
+ p->resp_time_value = 18;
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portsamplescontrol(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplescontrol *p =
+ (struct ib_pma_portsamplescontrol *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 port_select = p->port_select;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 || port_select != port) {
+ pmp->status |= IB_SMP_INVALID_FIELD;
+ goto bail;
+ }
+ spin_lock_irqsave(&ibp->lock, flags);
+ p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
+ p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ p->counter_width = 4; /* 32 bit counters */
+ p->counter_mask0_9 = COUNTER_MASK0_9;
+ p->sample_start = cpu_to_be32(ibp->pma_sample_start);
+ p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
+ p->tag = cpu_to_be16(ibp->pma_tag);
+ p->counter_select[0] = ibp->pma_counter_select[0];
+ p->counter_select[1] = ibp->pma_counter_select[1];
+ p->counter_select[2] = ibp->pma_counter_select[2];
+ p->counter_select[3] = ibp->pma_counter_select[3];
+ p->counter_select[4] = ibp->pma_counter_select[4];
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+bail:
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_set_portsamplescontrol(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplescontrol *p =
+ (struct ib_pma_portsamplescontrol *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 status, xmit_flags;
+ int ret;
+
+ if (pmp->attr_mod != 0 || p->port_select != port) {
+ pmp->status |= IB_SMP_INVALID_FIELD;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&ibp->lock, flags);
+
+ /* Port Sampling code owns the PS* HW counters */
+ xmit_flags = ppd->cong_stats.flags;
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE ||
+ (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
+ xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
+ ibp->pma_sample_start = be32_to_cpu(p->sample_start);
+ ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
+ ibp->pma_tag = be16_to_cpu(p->tag);
+ ibp->pma_counter_select[0] = p->counter_select[0];
+ ibp->pma_counter_select[1] = p->counter_select[1];
+ ibp->pma_counter_select[2] = p->counter_select[2];
+ ibp->pma_counter_select[3] = p->counter_select[3];
+ ibp->pma_counter_select[4] = p->counter_select[4];
+ dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
+ ibp->pma_sample_start);
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ ret = pma_get_portsamplescontrol(pmp, ibdev, port);
+
+bail:
+ return ret;
+}
+
+static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
+ __be16 sel)
+{
+ u64 ret;
+
+ switch (sel) {
+ case IB_PMA_PORT_XMIT_DATA:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
+ break;
+ case IB_PMA_PORT_RCV_DATA:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
+ break;
+ case IB_PMA_PORT_XMIT_PKTS:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
+ break;
+ case IB_PMA_PORT_RCV_PKTS:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
+ break;
+ case IB_PMA_PORT_XMIT_WAIT:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* This function assumes that the xmit_wait lock is already held */
+static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
+{
+ u32 delta;
+
+ delta = get_counter(&ppd->ibport_data, ppd,
+ IB_PMA_PORT_XMIT_WAIT);
+ return ppd->cong_stats.counter + delta;
+}
+
+static void cache_hw_sample_counters(struct qib_pportdata *ppd)
+{
+ struct qib_ibport *ibp = &ppd->ibport_data;
+
+ ppd->cong_stats.counter_cache.psxmitdata =
+ get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
+ ppd->cong_stats.counter_cache.psrcvdata =
+ get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
+ ppd->cong_stats.counter_cache.psxmitpkts =
+ get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
+ ppd->cong_stats.counter_cache.psrcvpkts =
+ get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
+ ppd->cong_stats.counter_cache.psxmitwait =
+ get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
+}
+
+static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
+ __be16 sel)
+{
+ u64 ret;
+
+ switch (sel) {
+ case IB_PMA_PORT_XMIT_DATA:
+ ret = ppd->cong_stats.counter_cache.psxmitdata;
+ break;
+ case IB_PMA_PORT_RCV_DATA:
+ ret = ppd->cong_stats.counter_cache.psrcvdata;
+ break;
+ case IB_PMA_PORT_XMIT_PKTS:
+ ret = ppd->cong_stats.counter_cache.psxmitpkts;
+ break;
+ case IB_PMA_PORT_RCV_PKTS:
+ ret = ppd->cong_stats.counter_cache.psrcvpkts;
+ break;
+ case IB_PMA_PORT_XMIT_WAIT:
+ ret = ppd->cong_stats.counter_cache.psxmitwait;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int pma_get_portsamplesresult(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplesresult *p =
+ (struct ib_pma_portsamplesresult *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 status;
+ int i;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+ spin_lock_irqsave(&ibp->lock, flags);
+ p->tag = cpu_to_be16(ibp->pma_tag);
+ if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
+ p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ else {
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ p->sample_status = cpu_to_be16(status);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+ cache_hw_sample_counters(ppd);
+ ppd->cong_stats.counter =
+ xmit_wait_get_value_delta(ppd);
+ dd->f_set_cntr_sample(ppd,
+ QIB_CONG_TIMER_PSINTERVAL, 0);
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+ p->counter[i] = cpu_to_be32(
+ get_cache_hw_sample_counters(
+ ppd, ibp->pma_counter_select[i]));
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplesresult_ext *p =
+ (struct ib_pma_portsamplesresult_ext *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 status;
+ int i;
+
+ /* Port Sampling code owns the PS* HW counters */
+ memset(pmp->data, 0, sizeof(pmp->data));
+ spin_lock_irqsave(&ibp->lock, flags);
+ p->tag = cpu_to_be16(ibp->pma_tag);
+ if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
+ p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ else {
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ p->sample_status = cpu_to_be16(status);
+ /* 64 bits */
+ p->extended_width = cpu_to_be32(0x80000000);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+ cache_hw_sample_counters(ppd);
+ ppd->cong_stats.counter =
+ xmit_wait_get_value_delta(ppd);
+ dd->f_set_cntr_sample(ppd,
+ QIB_CONG_TIMER_PSINTERVAL, 0);
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+ p->counter[i] = cpu_to_be64(
+ get_cache_hw_sample_counters(
+ ppd, ibp->pma_counter_select[i]));
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portcounters(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_verbs_counters cntrs;
+ u8 port_select = p->port_select;
+
+ qib_get_counters(ppd, &cntrs);
+
+ /* Adjust counters for any resets done. */
+ cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
+ cntrs.link_error_recovery_counter -=
+ ibp->z_link_error_recovery_counter;
+ cntrs.link_downed_counter -= ibp->z_link_downed_counter;
+ cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
+ cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
+ cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
+ cntrs.port_xmit_data -= ibp->z_port_xmit_data;
+ cntrs.port_rcv_data -= ibp->z_port_rcv_data;
+ cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
+ cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
+ cntrs.local_link_integrity_errors -=
+ ibp->z_local_link_integrity_errors;
+ cntrs.excessive_buffer_overrun_errors -=
+ ibp->z_excessive_buffer_overrun_errors;
+ cntrs.vl15_dropped -= ibp->z_vl15_dropped;
+ cntrs.vl15_dropped += ibp->n_vl15_dropped;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 || port_select != port)
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ if (cntrs.symbol_error_counter > 0xFFFFUL)
+ p->symbol_error_counter = cpu_to_be16(0xFFFF);
+ else
+ p->symbol_error_counter =
+ cpu_to_be16((u16)cntrs.symbol_error_counter);
+ if (cntrs.link_error_recovery_counter > 0xFFUL)
+ p->link_error_recovery_counter = 0xFF;
+ else
+ p->link_error_recovery_counter =
+ (u8)cntrs.link_error_recovery_counter;
+ if (cntrs.link_downed_counter > 0xFFUL)
+ p->link_downed_counter = 0xFF;
+ else
+ p->link_downed_counter = (u8)cntrs.link_downed_counter;
+ if (cntrs.port_rcv_errors > 0xFFFFUL)
+ p->port_rcv_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_errors =
+ cpu_to_be16((u16) cntrs.port_rcv_errors);
+ if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
+ p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_remphys_errors =
+ cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
+ if (cntrs.port_xmit_discards > 0xFFFFUL)
+ p->port_xmit_discards = cpu_to_be16(0xFFFF);
+ else
+ p->port_xmit_discards =
+ cpu_to_be16((u16)cntrs.port_xmit_discards);
+ if (cntrs.local_link_integrity_errors > 0xFUL)
+ cntrs.local_link_integrity_errors = 0xFUL;
+ if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
+ cntrs.excessive_buffer_overrun_errors = 0xFUL;
+ p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ cntrs.excessive_buffer_overrun_errors;
+ if (cntrs.vl15_dropped > 0xFFFFUL)
+ p->vl15_dropped = cpu_to_be16(0xFFFF);
+ else
+ p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
+ if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
+ p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
+ if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
+ p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
+ if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
+ p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_xmit_packets =
+ cpu_to_be32((u32)cntrs.port_xmit_packets);
+ if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
+ p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_rcv_packets =
+ cpu_to_be32((u32) cntrs.port_rcv_packets);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portcounters_cong(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ /* Congestion PMA packets start at offset 24 not 64 */
+ struct ib_pma_portcounters_cong *p =
+ (struct ib_pma_portcounters_cong *)pmp->reserved;
+ struct qib_verbs_counters cntrs;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = dd_from_ppd(ppd);
+ u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;
+ u64 xmit_wait_counter;
+ unsigned long flags;
+
+ /*
+ * This check is performed only in the GET method because the
+ * SET method ends up calling this anyway.
+ */
+ if (!dd->psxmitwait_supported)
+ pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ if (port_select != port)
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ qib_get_counters(ppd, &cntrs);
+ spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ xmit_wait_counter = xmit_wait_get_value_delta(ppd);
+ spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+
+ /* Adjust counters for any resets done. */
+ cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
+ cntrs.link_error_recovery_counter -=
+ ibp->z_link_error_recovery_counter;
+ cntrs.link_downed_counter -= ibp->z_link_downed_counter;
+ cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
+ cntrs.port_rcv_remphys_errors -=
+ ibp->z_port_rcv_remphys_errors;
+ cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
+ cntrs.local_link_integrity_errors -=
+ ibp->z_local_link_integrity_errors;
+ cntrs.excessive_buffer_overrun_errors -=
+ ibp->z_excessive_buffer_overrun_errors;
+ cntrs.vl15_dropped -= ibp->z_vl15_dropped;
+ cntrs.vl15_dropped += ibp->n_vl15_dropped;
+ cntrs.port_xmit_data -= ibp->z_port_xmit_data;
+ cntrs.port_rcv_data -= ibp->z_port_rcv_data;
+ cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
+ cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
+
+ memset(pmp->reserved, 0, sizeof(pmp->reserved) +
+ sizeof(pmp->data));
+
+ /*
+ * Set top 3 bits to indicate interval in picoseconds in
+ * remaining bits.
+ */
+ p->port_check_rate =
+ cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
+ (dd->psxmitwait_check_rate &
+ ~(QIB_XMIT_RATE_PICO << 13)));
+ p->port_adr_events = cpu_to_be64(0);
+ p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
+ p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
+ p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
+ p->port_xmit_packets =
+ cpu_to_be64(cntrs.port_xmit_packets);
+ p->port_rcv_packets =
+ cpu_to_be64(cntrs.port_rcv_packets);
+ if (cntrs.symbol_error_counter > 0xFFFFUL)
+ p->symbol_error_counter = cpu_to_be16(0xFFFF);
+ else
+ p->symbol_error_counter =
+ cpu_to_be16(
+ (u16)cntrs.symbol_error_counter);
+ if (cntrs.link_error_recovery_counter > 0xFFUL)
+ p->link_error_recovery_counter = 0xFF;
+ else
+ p->link_error_recovery_counter =
+ (u8)cntrs.link_error_recovery_counter;
+ if (cntrs.link_downed_counter > 0xFFUL)
+ p->link_downed_counter = 0xFF;
+ else
+ p->link_downed_counter =
+ (u8)cntrs.link_downed_counter;
+ if (cntrs.port_rcv_errors > 0xFFFFUL)
+ p->port_rcv_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_errors =
+ cpu_to_be16((u16) cntrs.port_rcv_errors);
+ if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
+ p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_remphys_errors =
+ cpu_to_be16(
+ (u16)cntrs.port_rcv_remphys_errors);
+ if (cntrs.port_xmit_discards > 0xFFFFUL)
+ p->port_xmit_discards = cpu_to_be16(0xFFFF);
+ else
+ p->port_xmit_discards =
+ cpu_to_be16((u16)cntrs.port_xmit_discards);
+ if (cntrs.local_link_integrity_errors > 0xFUL)
+ cntrs.local_link_integrity_errors = 0xFUL;
+ if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
+ cntrs.excessive_buffer_overrun_errors = 0xFUL;
+ p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ cntrs.excessive_buffer_overrun_errors;
+ if (cntrs.vl15_dropped > 0xFFFFUL)
+ p->vl15_dropped = cpu_to_be16(0xFFFF);
+ else
+ p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
+
+ return reply((struct ib_smp *)pmp);
+}
+
+static int pma_get_portcounters_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters_ext *p =
+ (struct ib_pma_portcounters_ext *)pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 swords, rwords, spkts, rpkts, xwait;
+ u8 port_select = p->port_select;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 || port_select != port) {
+ pmp->status |= IB_SMP_INVALID_FIELD;
+ goto bail;
+ }
+
+ qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
+
+ /* Adjust counters for any resets done. */
+ swords -= ibp->z_port_xmit_data;
+ rwords -= ibp->z_port_rcv_data;
+ spkts -= ibp->z_port_xmit_packets;
+ rpkts -= ibp->z_port_rcv_packets;
+
+ p->port_xmit_data = cpu_to_be64(swords);
+ p->port_rcv_data = cpu_to_be64(rwords);
+ p->port_xmit_packets = cpu_to_be64(spkts);
+ p->port_rcv_packets = cpu_to_be64(rpkts);
+ p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);
+ p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv);
+ p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);
+ p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv);
+
+bail:
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_set_portcounters(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_verbs_counters cntrs;
+
+ /*
+ * Since the HW doesn't support clearing counters, we save the
+ * current count and subtract it from future responses.
+ */
+ qib_get_counters(ppd, &cntrs);
+
+ if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
+ ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
+
+ if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
+ ibp->z_link_error_recovery_counter =
+ cntrs.link_error_recovery_counter;
+
+ if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
+ ibp->z_link_downed_counter = cntrs.link_downed_counter;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
+ ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
+ ibp->z_port_rcv_remphys_errors =
+ cntrs.port_rcv_remphys_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
+ ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
+
+ if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
+ ibp->z_local_link_integrity_errors =
+ cntrs.local_link_integrity_errors;
+
+ if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
+ ibp->z_excessive_buffer_overrun_errors =
+ cntrs.excessive_buffer_overrun_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
+ ibp->n_vl15_dropped = 0;
+ ibp->z_vl15_dropped = cntrs.vl15_dropped;
+ }
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
+ ibp->z_port_xmit_data = cntrs.port_xmit_data;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
+ ibp->z_port_rcv_data = cntrs.port_rcv_data;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
+ ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
+ ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+
+ return pma_get_portcounters(pmp, ibdev, port);
+}
+
+static int pma_set_portcounters_cong(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = dd_from_ppd(ppd);
+ struct qib_verbs_counters cntrs;
+ u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;
+ int ret = 0;
+ unsigned long flags;
+
+ qib_get_counters(ppd, &cntrs);
+ /* Get counter values before we save them */
+ ret = pma_get_portcounters_cong(pmp, ibdev, port);
+
+ if (counter_select & IB_PMA_SEL_CONG_XMIT) {
+ spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ ppd->cong_stats.counter = 0;
+ dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
+ 0x0);
+ spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ }
+ if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
+ ibp->z_port_xmit_data = cntrs.port_xmit_data;
+ ibp->z_port_rcv_data = cntrs.port_rcv_data;
+ ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+ ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+ }
+ if (counter_select & IB_PMA_SEL_CONG_ALL) {
+ ibp->z_symbol_error_counter =
+ cntrs.symbol_error_counter;
+ ibp->z_link_error_recovery_counter =
+ cntrs.link_error_recovery_counter;
+ ibp->z_link_downed_counter =
+ cntrs.link_downed_counter;
+ ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+ ibp->z_port_rcv_remphys_errors =
+ cntrs.port_rcv_remphys_errors;
+ ibp->z_port_xmit_discards =
+ cntrs.port_xmit_discards;
+ ibp->z_local_link_integrity_errors =
+ cntrs.local_link_integrity_errors;
+ ibp->z_excessive_buffer_overrun_errors =
+ cntrs.excessive_buffer_overrun_errors;
+ ibp->n_vl15_dropped = 0;
+ ibp->z_vl15_dropped = cntrs.vl15_dropped;
+ }
+
+ return ret;
+}
+
+static int pma_set_portcounters_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 swords, rwords, spkts, rpkts, xwait;
+
+ qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
+
+ if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
+ ibp->z_port_xmit_data = swords;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
+ ibp->z_port_rcv_data = rwords;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
+ ibp->z_port_xmit_packets = spkts;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
+ ibp->z_port_rcv_packets = rpkts;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
+ ibp->n_unicast_xmit = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
+ ibp->n_unicast_rcv = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
+ ibp->n_multicast_xmit = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
+ ibp->n_multicast_rcv = 0;
+
+ return pma_get_portcounters_ext(pmp, ibdev, port);
+}
+
+static int process_subn(struct ib_device *ibdev, int mad_flags,
+ u8 port, struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_smp *smp = (struct ib_smp *)out_mad;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ int ret;
+
+ *out_mad = *in_mad;
+ if (smp->class_version != 1) {
+ smp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ ret = check_mkey(ibp, smp, mad_flags);
+ if (ret) {
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ /*
+ * If this is a get/set portinfo, we already check the
+ * M_Key if the MAD is for another port and the M_Key
+ * is OK on the receiving port. This check is needed
+ * to increment the error counters when the M_Key
+ * fails to match on *both* ports.
+ */
+ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+ (smp->method == IB_MGMT_METHOD_GET ||
+ smp->method == IB_MGMT_METHOD_SET) &&
+ port_num && port_num <= ibdev->phys_port_cnt &&
+ port != port_num)
+ (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
+ goto bail;
+ }
+
+ switch (smp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (smp->attr_id) {
+ case IB_SMP_ATTR_NODE_DESC:
+ ret = subn_get_nodedescription(smp, ibdev);
+ goto bail;
+ case IB_SMP_ATTR_NODE_INFO:
+ ret = subn_get_nodeinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_GUID_INFO:
+ ret = subn_get_guidinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = subn_get_portinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = subn_get_pkeytable(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SL_TO_VL_TABLE:
+ ret = subn_get_sl_to_vl(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_VL_ARB_TABLE:
+ ret = subn_get_vl_arb(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SM_INFO:
+ if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+ ret = IB_MAD_RESULT_SUCCESS |
+ IB_MAD_RESULT_CONSUMED;
+ goto bail;
+ }
+ if (ibp->port_cap_flags & IB_PORT_SM) {
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+ }
+ /* FALLTHROUGH */
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_SET:
+ switch (smp->attr_id) {
+ case IB_SMP_ATTR_GUID_INFO:
+ ret = subn_set_guidinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = subn_set_portinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = subn_set_pkeytable(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SL_TO_VL_TABLE:
+ ret = subn_set_sl_to_vl(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_VL_ARB_TABLE:
+ ret = subn_set_vl_arb(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SM_INFO:
+ if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+ ret = IB_MAD_RESULT_SUCCESS |
+ IB_MAD_RESULT_CONSUMED;
+ goto bail;
+ }
+ if (ibp->port_cap_flags & IB_PORT_SM) {
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+ }
+ /* FALLTHROUGH */
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_TRAP_REPRESS:
+ if (smp->attr_id == IB_SMP_ATTR_NOTICE)
+ ret = subn_trap_repress(smp, ibdev, port);
+ else {
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply(smp);
+ }
+ goto bail;
+
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_REPORT:
+ case IB_MGMT_METHOD_REPORT_RESP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+
+ case IB_MGMT_METHOD_SEND:
+ if (ib_get_smp_direction(smp) &&
+ smp->attr_id == QIB_VENDOR_IPG) {
+ ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
+ smp->data[0]);
+ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ } else
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+
+ default:
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ ret = reply(smp);
+ }
+
+bail:
+ return ret;
+}
+
+static int process_perf(struct ib_device *ibdev, u8 port,
+ struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_perf *pmp = (struct ib_perf *)out_mad;
+ int ret;
+
+ *out_mad = *in_mad;
+ if (pmp->class_version != 1) {
+ pmp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ switch (pmp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (pmp->attr_id) {
+ case IB_PMA_CLASS_PORT_INFO:
+ ret = pma_get_classportinfo(pmp, ibdev);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_CONTROL:
+ ret = pma_get_portsamplescontrol(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_RESULT:
+ ret = pma_get_portsamplesresult(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_RESULT_EXT:
+ ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS:
+ ret = pma_get_portcounters(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ ret = pma_get_portcounters_ext(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_CONG:
+ ret = pma_get_portcounters_cong(pmp, ibdev, port);
+ goto bail;
+ default:
+ pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_SET:
+ switch (pmp->attr_id) {
+ case IB_PMA_PORT_SAMPLES_CONTROL:
+ ret = pma_set_portsamplescontrol(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS:
+ ret = pma_set_portcounters(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ ret = pma_set_portcounters_ext(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_CONG:
+ ret = pma_set_portcounters_cong(pmp, ibdev, port);
+ goto bail;
+ default:
+ pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+
+ default:
+ pmp->status |= IB_SMP_UNSUP_METHOD;
+ ret = reply((struct ib_smp *) pmp);
+ }
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_process_mad - process an incoming MAD packet
+ * @ibdev: the infiniband device this packet came in on
+ * @mad_flags: MAD flags
+ * @port: the port number this packet came in on
+ * @in_wc: the work completion entry for this packet
+ * @in_grh: the global route header for this packet
+ * @in_mad: the incoming MAD
+ * @out_mad: any outgoing MAD reply
+ *
+ * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
+ * interested in processing.
+ *
+ * Note that the verbs framework has already done the MAD sanity checks,
+ * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * MADs.
+ *
+ * This is called by the ib_mad module.
+ */
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ int ret;
+
+ switch (in_mad->mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
+ goto bail;
+
+ case IB_MGMT_CLASS_PERF_MGMT:
+ ret = process_perf(ibdev, port, in_mad, out_mad);
+ goto bail;
+
+ default:
+ ret = IB_MAD_RESULT_SUCCESS;
+ }
+
+bail:
+ return ret;
+}
+
+static void send_handler(struct ib_mad_agent *agent,
+ struct ib_mad_send_wc *mad_send_wc)
+{
+ ib_free_send_mad(mad_send_wc->send_buf);
+}
+
+static void xmit_wait_timer_func(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_devdata *dd = dd_from_ppd(ppd);
+ unsigned long flags;
+ u8 status;
+
+ spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+ /* save counter cache */
+ cache_hw_sample_counters(ppd);
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+ } else
+ goto done;
+ }
+ ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
+ dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
+done:
+ spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
+}
+
+int qib_create_agents(struct qib_ibdev *dev)
+{
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct ib_mad_agent *agent;
+ struct qib_ibport *ibp;
+ int p;
+ int ret;
+
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
+ NULL, 0, send_handler,
+ NULL, NULL);
+ if (IS_ERR(agent)) {
+ ret = PTR_ERR(agent);
+ goto err;
+ }
+
+ /* Initialize xmit_wait structure */
+ dd->pport[p].cong_stats.counter = 0;
+ init_timer(&dd->pport[p].cong_stats.timer);
+ dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
+ dd->pport[p].cong_stats.timer.data =
+ (unsigned long)(&dd->pport[p]);
+ dd->pport[p].cong_stats.timer.expires = 0;
+ add_timer(&dd->pport[p].cong_stats.timer);
+
+ ibp->send_agent = agent;
+ }
+
+ return 0;
+
+err:
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ if (ibp->send_agent) {
+ agent = ibp->send_agent;
+ ibp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+ }
+
+ return ret;
+}
+
+void qib_free_agents(struct qib_ibdev *dev)
+{
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct ib_mad_agent *agent;
+ struct qib_ibport *ibp;
+ int p;
+
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ if (ibp->send_agent) {
+ agent = ibp->send_agent;
+ ibp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+ if (ibp->sm_ah) {
+ ib_destroy_ah(&ibp->sm_ah->ibah);
+ ibp->sm_ah = NULL;
+ }
+ if (dd->pport[p].cong_stats.timer.data)
+ del_timer_sync(&dd->pport[p].cong_stats.timer);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
new file mode 100644
index 000000000000..147aff9117d7
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
+#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
+#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
+#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
+
+struct ib_node_info {
+ u8 base_version;
+ u8 class_version;
+ u8 node_type;
+ u8 num_ports;
+ __be64 sys_guid;
+ __be64 node_guid;
+ __be64 port_guid;
+ __be16 partition_cap;
+ __be16 device_id;
+ __be32 revision;
+ u8 local_port_num;
+ u8 vendor_id[3];
+} __attribute__ ((packed));
+
+struct ib_mad_notice_attr {
+ u8 generic_type;
+ u8 prod_type_msb;
+ __be16 prod_type_lsb;
+ __be16 trap_num;
+ __be16 issuer_lid;
+ __be16 toggle_count;
+
+ union {
+ struct {
+ u8 details[54];
+ } raw_data;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* where violation happened */
+ u8 port_num; /* where violation happened */
+ } __attribute__ ((packed)) ntc_129_131;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* LID where change occured */
+ u8 reserved2;
+ u8 local_changes; /* low bit - local changes */
+ __be32 new_cap_mask; /* new capability mask */
+ u8 reserved3;
+ u8 change_flags; /* low 3 bits only */
+ } __attribute__ ((packed)) ntc_144;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* lid where sys guid changed */
+ __be16 reserved2;
+ __be64 new_sys_guid;
+ } __attribute__ ((packed)) ntc_145;
+
+ struct {
+ __be16 reserved;
+ __be16 lid;
+ __be16 dr_slid;
+ u8 method;
+ u8 reserved2;
+ __be16 attr_id;
+ __be32 attr_mod;
+ __be64 mkey;
+ u8 reserved3;
+ u8 dr_trunc_hop;
+ u8 dr_rtn_path[30];
+ } __attribute__ ((packed)) ntc_256;
+
+ struct {
+ __be16 reserved;
+ __be16 lid1;
+ __be16 lid2;
+ __be32 key;
+ __be32 sl_qp1; /* SL: high 4 bits */
+ __be32 qp2; /* high 8 bits reserved */
+ union ib_gid gid1;
+ union ib_gid gid2;
+ } __attribute__ ((packed)) ntc_257_258;
+
+ } details;
+};
+
+/*
+ * Generic trap/notice types
+ */
+#define IB_NOTICE_TYPE_FATAL 0x80
+#define IB_NOTICE_TYPE_URGENT 0x81
+#define IB_NOTICE_TYPE_SECURITY 0x82
+#define IB_NOTICE_TYPE_SM 0x83
+#define IB_NOTICE_TYPE_INFO 0x84
+
+/*
+ * Generic trap/notice producers
+ */
+#define IB_NOTICE_PROD_CA cpu_to_be16(1)
+#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
+#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
+#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
+
+/*
+ * Generic trap/notice numbers
+ */
+#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129)
+#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130)
+#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131)
+#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144)
+#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145)
+#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256)
+#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257)
+#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258)
+
+/*
+ * Repress trap/notice flags
+ */
+#define IB_NOTICE_REPRESS_LLI_THRESH (1 << 0)
+#define IB_NOTICE_REPRESS_EBO_THRESH (1 << 1)
+#define IB_NOTICE_REPRESS_FLOW_UPDATE (1 << 2)
+#define IB_NOTICE_REPRESS_CAP_MASK_CHG (1 << 3)
+#define IB_NOTICE_REPRESS_SYS_GUID_CHG (1 << 4)
+#define IB_NOTICE_REPRESS_BAD_MKEY (1 << 5)
+#define IB_NOTICE_REPRESS_BAD_PKEY (1 << 6)
+#define IB_NOTICE_REPRESS_BAD_QKEY (1 << 7)
+
+/*
+ * Generic trap/notice other local changes flags (trap 144).
+ */
+#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
+#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
+#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01
+
+/*
+ * Generic trap/notice M_Key volation flags in dr_trunc_hop (trap 256).
+ */
+#define IB_NOTICE_TRAP_DR_NOTICE 0x80
+#define IB_NOTICE_TRAP_DR_TRUNC 0x40
+
+struct ib_vl_weight_elem {
+ u8 vl; /* Only low 4 bits, upper 4 bits reserved */
+ u8 weight;
+};
+
+#define IB_VLARB_LOWPRI_0_31 1
+#define IB_VLARB_LOWPRI_32_63 2
+#define IB_VLARB_HIGHPRI_0_31 3
+#define IB_VLARB_HIGHPRI_32_63 4
+
+/*
+ * PMA class portinfo capability mask bits
+ */
+#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
+#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
+#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
+
+#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
+#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
+#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
+#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
+#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
+#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
+#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
+
+struct ib_perf {
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ __be16 status;
+ __be16 unused;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ u8 reserved[40];
+ u8 data[192];
+} __attribute__ ((packed));
+
+struct ib_pma_classportinfo {
+ u8 base_version;
+ u8 class_version;
+ __be16 cap_mask;
+ u8 reserved[3];
+ u8 resp_time_value; /* only lower 5 bits */
+ union ib_gid redirect_gid;
+ __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
+ __be16 redirect_lid;
+ __be16 redirect_pkey;
+ __be32 redirect_qp; /* only lower 24 bits */
+ __be32 redirect_qkey;
+ union ib_gid trap_gid;
+ __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
+ __be16 trap_lid;
+ __be16 trap_pkey;
+ __be32 trap_hl_qp; /* 8, 24 bits respectively */
+ __be32 trap_qkey;
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplescontrol {
+ u8 opcode;
+ u8 port_select;
+ u8 tick;
+ u8 counter_width; /* only lower 3 bits */
+ __be32 counter_mask0_9; /* 2, 10 * 3, bits */
+ __be16 counter_mask10_14; /* 1, 5 * 3, bits */
+ u8 sample_mechanisms;
+ u8 sample_status; /* only lower 2 bits */
+ __be64 option_mask;
+ __be64 vendor_mask;
+ __be32 sample_start;
+ __be32 sample_interval;
+ __be16 tag;
+ __be16 counter_select[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplesresult {
+ __be16 tag;
+ __be16 sample_status; /* only lower 2 bits */
+ __be32 counter[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplesresult_ext {
+ __be16 tag;
+ __be16 sample_status; /* only lower 2 bits */
+ __be32 extended_width; /* only upper 2 bits */
+ __be64 counter[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portcounters {
+ u8 reserved;
+ u8 port_select;
+ __be16 counter_select;
+ __be16 symbol_error_counter;
+ u8 link_error_recovery_counter;
+ u8 link_downed_counter;
+ __be16 port_rcv_errors;
+ __be16 port_rcv_remphys_errors;
+ __be16 port_rcv_switch_relay_errors;
+ __be16 port_xmit_discards;
+ u8 port_xmit_constraint_errors;
+ u8 port_rcv_constraint_errors;
+ u8 reserved1;
+ u8 lli_ebor_errors; /* 4, 4, bits */
+ __be16 reserved2;
+ __be16 vl15_dropped;
+ __be32 port_xmit_data;
+ __be32 port_rcv_data;
+ __be32 port_xmit_packets;
+ __be32 port_rcv_packets;
+} __attribute__ ((packed));
+
+struct ib_pma_portcounters_cong {
+ u8 reserved;
+ u8 reserved1;
+ __be16 port_check_rate;
+ __be16 symbol_error_counter;
+ u8 link_error_recovery_counter;
+ u8 link_downed_counter;
+ __be16 port_rcv_errors;
+ __be16 port_rcv_remphys_errors;
+ __be16 port_rcv_switch_relay_errors;
+ __be16 port_xmit_discards;
+ u8 port_xmit_constraint_errors;
+ u8 port_rcv_constraint_errors;
+ u8 reserved2;
+ u8 lli_ebor_errors; /* 4, 4, bits */
+ __be16 reserved3;
+ __be16 vl15_dropped;
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_packets;
+ __be64 port_rcv_packets;
+ __be64 port_xmit_wait;
+ __be64 port_adr_events;
+} __attribute__ ((packed));
+
+#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00
+#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01
+
+#define QIB_XMIT_RATE_UNSUPPORTED 0x0
+#define QIB_XMIT_RATE_PICO 0x7
+/* number of 4nsec cycles equaling 2secs */
+#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
+
+#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
+#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
+#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
+#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
+#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
+#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
+#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
+#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
+#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
+#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
+#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
+#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
+#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
+
+#define IB_PMA_SEL_CONG_ALL 0x01
+#define IB_PMA_SEL_CONG_PORT_DATA 0x02
+#define IB_PMA_SEL_CONG_XMIT 0x04
+#define IB_PMA_SEL_CONG_ROUTING 0x08
+
+struct ib_pma_portcounters_ext {
+ u8 reserved;
+ u8 port_select;
+ __be16 counter_select;
+ __be32 reserved1;
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_packets;
+ __be64 port_rcv_packets;
+ __be64 port_unicast_xmit_packets;
+ __be64 port_unicast_rcv_packets;
+ __be64 port_multicast_xmit_packets;
+ __be64 port_multicast_rcv_packets;
+} __attribute__ ((packed));
+
+#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
+#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
+#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
+#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
+#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
+#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
+#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
+#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
+
+/*
+ * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
+ * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
+ * We support 5 counters which only count the mandatory quantities.
+ */
+#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
+#define COUNTER_MASK0_9 \
+ cpu_to_be32(COUNTER_MASK(1, 0) | \
+ COUNTER_MASK(1, 1) | \
+ COUNTER_MASK(1, 2) | \
+ COUNTER_MASK(1, 3) | \
+ COUNTER_MASK(1, 4))
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
new file mode 100644
index 000000000000..8b73a11d571c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mmap.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <asm/pgtable.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_release_mmap_info - free mmap info structure
+ * @ref: a pointer to the kref within struct qib_mmap_info
+ */
+void qib_release_mmap_info(struct kref *ref)
+{
+ struct qib_mmap_info *ip =
+ container_of(ref, struct qib_mmap_info, ref);
+ struct qib_ibdev *dev = to_idev(ip->context->device);
+
+ spin_lock_irq(&dev->pending_lock);
+ list_del(&ip->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+
+ vfree(ip->obj);
+ kfree(ip);
+}
+
+/*
+ * open and close keep track of how many times the CQ is mapped,
+ * to avoid releasing it.
+ */
+static void qib_vma_open(struct vm_area_struct *vma)
+{
+ struct qib_mmap_info *ip = vma->vm_private_data;
+
+ kref_get(&ip->ref);
+}
+
+static void qib_vma_close(struct vm_area_struct *vma)
+{
+ struct qib_mmap_info *ip = vma->vm_private_data;
+
+ kref_put(&ip->ref, qib_release_mmap_info);
+}
+
+static struct vm_operations_struct qib_vm_ops = {
+ .open = qib_vma_open,
+ .close = qib_vma_close,
+};
+
+/**
+ * qib_mmap - create a new mmap region
+ * @context: the IB user context of the process making the mmap() call
+ * @vma: the VMA to be initialized
+ * Return zero if the mmap is OK. Otherwise, return an errno.
+ */
+int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct qib_ibdev *dev = to_idev(context->device);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct qib_mmap_info *ip, *pp;
+ int ret = -EINVAL;
+
+ /*
+ * Search the device's list of objects waiting for a mmap call.
+ * Normally, this list is very short since a call to create a
+ * CQ, QP, or SRQ is soon followed by a call to mmap().
+ */
+ spin_lock_irq(&dev->pending_lock);
+ list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
+ pending_mmaps) {
+ /* Only the creator is allowed to mmap the object */
+ if (context != ip->context || (__u64) offset != ip->offset)
+ continue;
+ /* Don't allow a mmap larger than the object. */
+ if (size > ip->size)
+ break;
+
+ list_del_init(&ip->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+
+ ret = remap_vmalloc_range(vma, ip->obj, 0);
+ if (ret)
+ goto done;
+ vma->vm_ops = &qib_vm_ops;
+ vma->vm_private_data = ip;
+ qib_vma_open(vma);
+ goto done;
+ }
+ spin_unlock_irq(&dev->pending_lock);
+done:
+ return ret;
+}
+
+/*
+ * Allocate information for qib_mmap
+ */
+struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj) {
+ struct qib_mmap_info *ip;
+
+ ip = kmalloc(sizeof *ip, GFP_KERNEL);
+ if (!ip)
+ goto bail;
+
+ size = PAGE_ALIGN(size);
+
+ spin_lock_irq(&dev->mmap_offset_lock);
+ if (dev->mmap_offset == 0)
+ dev->mmap_offset = PAGE_SIZE;
+ ip->offset = dev->mmap_offset;
+ dev->mmap_offset += size;
+ spin_unlock_irq(&dev->mmap_offset_lock);
+
+ INIT_LIST_HEAD(&ip->pending_mmaps);
+ ip->size = size;
+ ip->context = context;
+ ip->obj = obj;
+ kref_init(&ip->ref);
+
+bail:
+ return ip;
+}
+
+void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
+ u32 size, void *obj)
+{
+ size = PAGE_ALIGN(size);
+
+ spin_lock_irq(&dev->mmap_offset_lock);
+ if (dev->mmap_offset == 0)
+ dev->mmap_offset = PAGE_SIZE;
+ ip->offset = dev->mmap_offset;
+ dev->mmap_offset += size;
+ spin_unlock_irq(&dev->mmap_offset_lock);
+
+ ip->size = size;
+ ip->obj = obj;
+}
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
new file mode 100644
index 000000000000..5f95f0f6385d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+
+/* Fast memory region */
+struct qib_fmr {
+ struct ib_fmr ibfmr;
+ u8 page_shift;
+ struct qib_mregion mr; /* must be last */
+};
+
+static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct qib_fmr, ibfmr);
+}
+
+/**
+ * qib_get_dma_mr - get a DMA memory region
+ * @pd: protection domain for this memory region
+ * @acc: access flags
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ * Note that all DMA addresses should be created via the
+ * struct ib_dma_mapping_ops functions (see qib_dma.c).
+ */
+struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct qib_ibdev *dev = to_idev(pd->device);
+ struct qib_mr *mr;
+ struct ib_mr *ret;
+ unsigned long flags;
+
+ if (to_ipd(pd)->user) {
+ ret = ERR_PTR(-EPERM);
+ goto bail;
+ }
+
+ mr = kzalloc(sizeof *mr, GFP_KERNEL);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ mr->mr.access_flags = acc;
+ atomic_set(&mr->mr.refcount, 0);
+
+ spin_lock_irqsave(&dev->lk_table.lock, flags);
+ if (!dev->dma_mr)
+ dev->dma_mr = &mr->mr;
+ spin_unlock_irqrestore(&dev->lk_table.lock, flags);
+
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
+{
+ struct qib_mr *mr;
+ int m, i = 0;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
+ mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+ if (!mr)
+ goto done;
+
+ /* Allocate first level page tables. */
+ for (; i < m; i++) {
+ mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
+ if (!mr->mr.map[i])
+ goto bail;
+ }
+ mr->mr.mapsz = m;
+ mr->mr.max_segs = count;
+
+ /*
+ * ib_reg_phys_mr() will initialize mr->ibmr except for
+ * lkey and rkey.
+ */
+ if (!qib_alloc_lkey(lk_table, &mr->mr))
+ goto bail;
+ mr->ibmr.lkey = mr->mr.lkey;
+ mr->ibmr.rkey = mr->mr.lkey;
+
+ atomic_set(&mr->mr.refcount, 0);
+ goto done;
+
+bail:
+ while (i)
+ kfree(mr->mr.map[--i]);
+ kfree(mr);
+ mr = NULL;
+
+done:
+ return mr;
+}
+
+/**
+ * qib_reg_phys_mr - register a physical memory region
+ * @pd: protection domain for this memory region
+ * @buffer_list: pointer to the list of physical buffers to register
+ * @num_phys_buf: the number of physical buffers to register
+ * @iova_start: the starting address passed over IB which maps to this MR
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start)
+{
+ struct qib_mr *mr;
+ int n, m, i;
+ struct ib_mr *ret;
+
+ mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
+ if (mr == NULL) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ mr->mr.pd = pd;
+ mr->mr.user_base = *iova_start;
+ mr->mr.iova = *iova_start;
+ mr->mr.length = 0;
+ mr->mr.offset = 0;
+ mr->mr.access_flags = acc;
+ mr->umem = NULL;
+
+ m = 0;
+ n = 0;
+ for (i = 0; i < num_phys_buf; i++) {
+ mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
+ mr->mr.map[m]->segs[n].length = buffer_list[i].size;
+ mr->mr.length += buffer_list[i].size;
+ n++;
+ if (n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_reg_user_mr - register a userspace memory region
+ * @pd: protection domain for this memory region
+ * @start: starting userspace address
+ * @length: length of region to register
+ * @virt_addr: virtual address to use (from HCA's point of view)
+ * @mr_access_flags: access flags for this memory region
+ * @udata: unused by the QLogic_IB driver
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata)
+{
+ struct qib_mr *mr;
+ struct ib_umem *umem;
+ struct ib_umem_chunk *chunk;
+ int n, m, i;
+ struct ib_mr *ret;
+
+ if (length == 0) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ umem = ib_umem_get(pd->uobject->context, start, length,
+ mr_access_flags, 0);
+ if (IS_ERR(umem))
+ return (void *) umem;
+
+ n = 0;
+ list_for_each_entry(chunk, &umem->chunk_list, list)
+ n += chunk->nents;
+
+ mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ ib_umem_release(umem);
+ goto bail;
+ }
+
+ mr->mr.pd = pd;
+ mr->mr.user_base = start;
+ mr->mr.iova = virt_addr;
+ mr->mr.length = length;
+ mr->mr.offset = umem->offset;
+ mr->mr.access_flags = mr_access_flags;
+ mr->umem = umem;
+
+ m = 0;
+ n = 0;
+ list_for_each_entry(chunk, &umem->chunk_list, list) {
+ for (i = 0; i < chunk->nents; i++) {
+ void *vaddr;
+
+ vaddr = page_address(sg_page(&chunk->page_list[i]));
+ if (!vaddr) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ mr->mr.map[m]->segs[n].vaddr = vaddr;
+ mr->mr.map[m]->segs[n].length = umem->page_size;
+ n++;
+ if (n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_dereg_mr - unregister and free a memory region
+ * @ibmr: the memory region to free
+ *
+ * Returns 0 on success.
+ *
+ * Note that this is called to free MRs created by qib_get_dma_mr()
+ * or qib_reg_user_mr().
+ */
+int qib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct qib_mr *mr = to_imr(ibmr);
+ struct qib_ibdev *dev = to_idev(ibmr->device);
+ int ret;
+ int i;
+
+ ret = qib_free_lkey(dev, &mr->mr);
+ if (ret)
+ return ret;
+
+ i = mr->mr.mapsz;
+ while (i)
+ kfree(mr->mr.map[--i]);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+ kfree(mr);
+ return 0;
+}
+
+/*
+ * Allocate a memory region usable with the
+ * IB_WR_FAST_REG_MR send work request.
+ *
+ * Return the memory region on success, otherwise return an errno.
+ */
+struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
+{
+ struct qib_mr *mr;
+
+ mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
+ if (mr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mr->mr.pd = pd;
+ mr->mr.user_base = 0;
+ mr->mr.iova = 0;
+ mr->mr.length = 0;
+ mr->mr.offset = 0;
+ mr->mr.access_flags = 0;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+}
+
+struct ib_fast_reg_page_list *
+qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
+{
+ unsigned size = page_list_len * sizeof(u64);
+ struct ib_fast_reg_page_list *pl;
+
+ if (size > PAGE_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ pl = kmalloc(sizeof *pl, GFP_KERNEL);
+ if (!pl)
+ return ERR_PTR(-ENOMEM);
+
+ pl->page_list = kmalloc(size, GFP_KERNEL);
+ if (!pl->page_list)
+ goto err_free;
+
+ return pl;
+
+err_free:
+ kfree(pl);
+ return ERR_PTR(-ENOMEM);
+}
+
+void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
+{
+ kfree(pl->page_list);
+ kfree(pl);
+}
+
+/**
+ * qib_alloc_fmr - allocate a fast memory region
+ * @pd: the protection domain for this memory region
+ * @mr_access_flags: access flags for this memory region
+ * @fmr_attr: fast memory region attributes
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr)
+{
+ struct qib_fmr *fmr;
+ int m, i = 0;
+ struct ib_fmr *ret;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
+ fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+ if (!fmr)
+ goto bail;
+
+ /* Allocate first level page tables. */
+ for (; i < m; i++) {
+ fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
+ GFP_KERNEL);
+ if (!fmr->mr.map[i])
+ goto bail;
+ }
+ fmr->mr.mapsz = m;
+
+ /*
+ * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
+ * rkey.
+ */
+ if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
+ goto bail;
+ fmr->ibfmr.rkey = fmr->mr.lkey;
+ fmr->ibfmr.lkey = fmr->mr.lkey;
+ /*
+ * Resources are allocated but no valid mapping (RKEY can't be
+ * used).
+ */
+ fmr->mr.pd = pd;
+ fmr->mr.user_base = 0;
+ fmr->mr.iova = 0;
+ fmr->mr.length = 0;
+ fmr->mr.offset = 0;
+ fmr->mr.access_flags = mr_access_flags;
+ fmr->mr.max_segs = fmr_attr->max_pages;
+ fmr->page_shift = fmr_attr->page_shift;
+
+ atomic_set(&fmr->mr.refcount, 0);
+ ret = &fmr->ibfmr;
+ goto done;
+
+bail:
+ while (i)
+ kfree(fmr->mr.map[--i]);
+ kfree(fmr);
+ ret = ERR_PTR(-ENOMEM);
+
+done:
+ return ret;
+}
+
+/**
+ * qib_map_phys_fmr - set up a fast memory region
+ * @ibmfr: the fast memory region to set up
+ * @page_list: the list of pages to associate with the fast memory region
+ * @list_len: the number of pages to associate with the fast memory region
+ * @iova: the virtual address of the start of the fast memory region
+ *
+ * This may be called from interrupt context.
+ */
+
+int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova)
+{
+ struct qib_fmr *fmr = to_ifmr(ibfmr);
+ struct qib_lkey_table *rkt;
+ unsigned long flags;
+ int m, n, i;
+ u32 ps;
+ int ret;
+
+ if (atomic_read(&fmr->mr.refcount))
+ return -EBUSY;
+
+ if (list_len > fmr->mr.max_segs) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ rkt = &to_idev(ibfmr->device)->lk_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = iova;
+ fmr->mr.iova = iova;
+ ps = 1 << fmr->page_shift;
+ fmr->mr.length = list_len * ps;
+ m = 0;
+ n = 0;
+ for (i = 0; i < list_len; i++) {
+ fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
+ fmr->mr.map[m]->segs[n].length = ps;
+ if (++n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_unmap_fmr - unmap fast memory regions
+ * @fmr_list: the list of fast memory regions to unmap
+ *
+ * Returns 0 on success.
+ */
+int qib_unmap_fmr(struct list_head *fmr_list)
+{
+ struct qib_fmr *fmr;
+ struct qib_lkey_table *rkt;
+ unsigned long flags;
+
+ list_for_each_entry(fmr, fmr_list, ibfmr.list) {
+ rkt = &to_idev(fmr->ibfmr.device)->lk_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = 0;
+ fmr->mr.iova = 0;
+ fmr->mr.length = 0;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * qib_dealloc_fmr - deallocate a fast memory region
+ * @ibfmr: the fast memory region to deallocate
+ *
+ * Returns 0 on success.
+ */
+int qib_dealloc_fmr(struct ib_fmr *ibfmr)
+{
+ struct qib_fmr *fmr = to_ifmr(ibfmr);
+ int ret;
+ int i;
+
+ ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
+ if (ret)
+ return ret;
+
+ i = fmr->mr.mapsz;
+ while (i)
+ kfree(fmr->mr.map[--i]);
+ kfree(fmr);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
new file mode 100644
index 000000000000..c926bf4541df
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/aer.h>
+
+#include "qib.h"
+
+/*
+ * This file contains PCIe utility routines that are common to the
+ * various QLogic InfiniPath adapters
+ */
+
+/*
+ * Code to adjust PCIe capabilities.
+ * To minimize the change footprint, we call it
+ * from qib_pcie_params, which every chip-specific
+ * file calls, even though this violates some
+ * expectations of harmlessness.
+ */
+static int qib_tune_pcie_caps(struct qib_devdata *);
+static int qib_tune_pcie_coalesce(struct qib_devdata *);
+
+/*
+ * Do all the common PCIe setup and initialization.
+ * devdata is not yet allocated, and is not allocated until after this
+ * routine returns success. Therefore qib_dev_err() can't be used for error
+ * printing.
+ */
+int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ /*
+ * This can happen (in theory) iff:
+ * We did a chip reset, and then failed to reprogram the
+ * BAR, or the chip reset due to an internal error. We then
+ * unloaded the driver and reloaded it.
+ *
+ * Both reset cases set the BAR back to initial state. For
+ * the latter case, the AER sticky error bit at offset 0x718
+ * should be set, but the Linux kernel doesn't yet know
+ * about that, it appears. If the original BAR was retained
+ * in the kernel data structures, this may be OK.
+ */
+ qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
+ -ret);
+ goto done;
+ }
+
+ ret = pci_request_regions(pdev, QIB_DRV_NAME);
+ if (ret) {
+ qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
+ goto bail;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ /*
+ * If the 64 bit setup fails, try 32 bit. Some systems
+ * do not setup 64 bit maps on systems with 2GB or less
+ * memory installed.
+ */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
+ goto bail;
+ }
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ } else
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ qib_early_err(&pdev->dev,
+ "Unable to set DMA consistent mask: %d\n", ret);
+
+ pci_set_master(pdev);
+ ret = pci_enable_pcie_error_reporting(pdev);
+ if (ret)
+ qib_early_err(&pdev->dev,
+ "Unable to enable pcie error reporting: %d\n",
+ ret);
+ goto done;
+
+bail:
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+done:
+ return ret;
+}
+
+/*
+ * Do remaining PCIe setup, once dd is allocated, and save away
+ * fields required to re-initialize after a chip reset, or for
+ * various other purposes
+ */
+int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned long len;
+ resource_size_t addr;
+
+ dd->pcidev = pdev;
+ pci_set_drvdata(pdev, dd);
+
+ addr = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+
+#if defined(__powerpc__)
+ /* There isn't a generic way to specify writethrough mappings */
+ dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU);
+#else
+ dd->kregbase = ioremap_nocache(addr, len);
+#endif
+
+ if (!dd->kregbase)
+ return -ENOMEM;
+
+ dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
+ dd->physaddr = addr; /* used for io_remap, etc. */
+
+ /*
+ * Save BARs to rewrite after device reset. Save all 64 bits of
+ * BAR, just in case.
+ */
+ dd->pcibar0 = addr;
+ dd->pcibar1 = addr >> 32;
+ dd->deviceid = ent->device; /* save for later use */
+ dd->vendorid = ent->vendor;
+
+ return 0;
+}
+
+/*
+ * Do PCIe cleanup, after chip-specific cleanup, etc. Just prior
+ * to releasing the dd memory.
+ * void because none of the core pcie cleanup returns are void
+ */
+void qib_pcie_ddcleanup(struct qib_devdata *dd)
+{
+ u64 __iomem *base = (void __iomem *) dd->kregbase;
+
+ dd->kregbase = NULL;
+ iounmap(base);
+ if (dd->piobase)
+ iounmap(dd->piobase);
+ if (dd->userbase)
+ iounmap(dd->userbase);
+
+ pci_disable_device(dd->pcidev);
+ pci_release_regions(dd->pcidev);
+
+ pci_set_drvdata(dd->pcidev, NULL);
+}
+
+static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
+ struct msix_entry *msix_entry)
+{
+ int ret;
+ u32 tabsize = 0;
+ u16 msix_flags;
+
+ pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags);
+ tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE);
+ if (tabsize > *msixcnt)
+ tabsize = *msixcnt;
+ ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
+ if (ret > 0) {
+ tabsize = ret;
+ ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
+ }
+ if (ret) {
+ qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, "
+ "falling back to INTx\n", tabsize, ret);
+ tabsize = 0;
+ }
+ *msixcnt = tabsize;
+
+ if (ret)
+ qib_enable_intx(dd->pcidev);
+
+}
+
+/**
+ * We save the msi lo and hi values, so we can restore them after
+ * chip reset (the kernel PCI infrastructure doesn't yet handle that
+ * correctly.
+ */
+static int qib_msi_setup(struct qib_devdata *dd, int pos)
+{
+ struct pci_dev *pdev = dd->pcidev;
+ u16 control;
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ qib_dev_err(dd, "pci_enable_msi failed: %d, "
+ "interrupts may not work\n", ret);
+ /* continue even if it fails, we may still be OK... */
+
+ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
+ &dd->msi_lo);
+ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
+ &dd->msi_hi);
+ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ /* now save the data (vector) info */
+ pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT)
+ ? 12 : 8),
+ &dd->msi_data);
+ return ret;
+}
+
+int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
+ struct msix_entry *entry)
+{
+ u16 linkstat, speed;
+ int pos = 0, pose, ret = 1;
+
+ pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ if (!pose) {
+ qib_dev_err(dd, "Can't find PCI Express capability!\n");
+ /* set up something... */
+ dd->lbus_width = 1;
+ dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
+ goto bail;
+ }
+
+ pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
+ if (nent && *nent && pos) {
+ qib_msix_setup(dd, pos, nent, entry);
+ ret = 0; /* did it, either MSIx or INTx */
+ } else {
+ pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+ if (pos)
+ ret = qib_msi_setup(dd, pos);
+ else
+ qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
+ }
+ if (!pos)
+ qib_enable_intx(dd->pcidev);
+
+ pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat);
+ /*
+ * speed is bits 0-3, linkwidth is bits 4-8
+ * no defines for them in headers
+ */
+ speed = linkstat & 0xf;
+ linkstat >>= 4;
+ linkstat &= 0x1f;
+ dd->lbus_width = linkstat;
+
+ switch (speed) {
+ case 1:
+ dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
+ break;
+ case 2:
+ dd->lbus_speed = 5000; /* Gen1, 5GHz */
+ break;
+ default: /* not defined, assume gen1 */
+ dd->lbus_speed = 2500;
+ break;
+ }
+
+ /*
+ * Check against expected pcie width and complain if "wrong"
+ * on first initialization, not afterwards (i.e., reset).
+ */
+ if (minw && linkstat < minw)
+ qib_dev_err(dd,
+ "PCIe width %u (x%u HCA), performance reduced\n",
+ linkstat, minw);
+
+ qib_tune_pcie_caps(dd);
+
+ qib_tune_pcie_coalesce(dd);
+
+bail:
+ /* fill in string, even on errors */
+ snprintf(dd->lbus_info, sizeof(dd->lbus_info),
+ "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
+ return ret;
+}
+
+/*
+ * Setup pcie interrupt stuff again after a reset. I'd like to just call
+ * pci_enable_msi() again for msi, but when I do that,
+ * the MSI enable bit doesn't get set in the command word, and
+ * we switch to to a different interrupt vector, which is confusing,
+ * so I instead just do it all inline. Perhaps somehow can tie this
+ * into the PCIe hotplug support at some point
+ */
+int qib_reinit_intr(struct qib_devdata *dd)
+{
+ int pos;
+ u16 control;
+ int ret = 0;
+
+ /* If we aren't using MSI, don't restore it */
+ if (!dd->msi_lo)
+ goto bail;
+
+ pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+ if (!pos) {
+ qib_dev_err(dd, "Can't find MSI capability, "
+ "can't restore MSI settings\n");
+ ret = 0;
+ /* nothing special for MSIx, just MSI */
+ goto bail;
+ }
+ pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
+ dd->msi_lo);
+ pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
+ dd->msi_hi);
+ pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
+ if (!(control & PCI_MSI_FLAGS_ENABLE)) {
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
+ control);
+ }
+ /* now rewrite the data (vector) info */
+ pci_write_config_word(dd->pcidev, pos +
+ ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
+ dd->msi_data);
+ ret = 1;
+bail:
+ if (!ret && (dd->flags & QIB_HAS_INTX)) {
+ qib_enable_intx(dd->pcidev);
+ ret = 1;
+ }
+
+ /* and now set the pci master bit again */
+ pci_set_master(dd->pcidev);
+
+ return ret;
+}
+
+/*
+ * Disable msi interrupt if enabled, and clear msi_lo.
+ * This is used primarily for the fallback to INTx, but
+ * is also used in reinit after reset, and during cleanup.
+ */
+void qib_nomsi(struct qib_devdata *dd)
+{
+ dd->msi_lo = 0;
+ pci_disable_msi(dd->pcidev);
+}
+
+/*
+ * Same as qib_nosmi, but for MSIx.
+ */
+void qib_nomsix(struct qib_devdata *dd)
+{
+ pci_disable_msix(dd->pcidev);
+}
+
+/*
+ * Similar to pci_intx(pdev, 1), except that we make sure
+ * msi(x) is off.
+ */
+void qib_enable_intx(struct pci_dev *pdev)
+{
+ u16 cw, new;
+ int pos;
+
+ /* first, turn on INTx */
+ pci_read_config_word(pdev, PCI_COMMAND, &cw);
+ new = cw & ~PCI_COMMAND_INTX_DISABLE;
+ if (new != cw)
+ pci_write_config_word(pdev, PCI_COMMAND, new);
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ if (pos) {
+ /* then turn off MSI */
+ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
+ new = cw & ~PCI_MSI_FLAGS_ENABLE;
+ if (new != cw)
+ pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
+ }
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ /* then turn off MSIx */
+ pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
+ new = cw & ~PCI_MSIX_FLAGS_ENABLE;
+ if (new != cw)
+ pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
+ }
+}
+
+/*
+ * These two routines are helper routines for the device reset code
+ * to move all the pcie code out of the chip-specific driver code.
+ */
+void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
+{
+ pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
+ pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
+ pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
+}
+
+void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
+{
+ int r;
+ r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
+ dd->pcibar0);
+ if (r)
+ qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
+ r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
+ dd->pcibar1);
+ if (r)
+ qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
+ /* now re-enable memory access, and restore cosmetic settings */
+ pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
+ pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
+ pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
+ r = pci_enable_device(dd->pcidev);
+ if (r)
+ qib_dev_err(dd, "pci_enable_device failed after "
+ "reset: %d\n", r);
+}
+
+/* code to adjust PCIe capabilities. */
+
+static int fld2val(int wd, int mask)
+{
+ int lsbmask;
+
+ if (!mask)
+ return 0;
+ wd &= mask;
+ lsbmask = mask ^ (mask & (mask - 1));
+ wd /= lsbmask;
+ return wd;
+}
+
+static int val2fld(int wd, int mask)
+{
+ int lsbmask;
+
+ if (!mask)
+ return 0;
+ lsbmask = mask ^ (mask & (mask - 1));
+ wd *= lsbmask;
+ return wd;
+}
+
+static int qib_pcie_coalesce;
+module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
+
+/*
+ * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
+ * chipsets. This is known to be unsafe for some revisions of some
+ * of these chipsets, with some BIOS settings, and enabling it on those
+ * systems may result in the system crashing, and/or data corruption.
+ */
+static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
+{
+ int r;
+ struct pci_dev *parent;
+ int ppos;
+ u16 devid;
+ u32 mask, bits, val;
+
+ if (!qib_pcie_coalesce)
+ return 0;
+
+ /* Find out supported and configured values for parent (root) */
+ parent = dd->pcidev->bus->self;
+ if (parent->bus->parent) {
+ qib_devinfo(dd->pcidev, "Parent not root\n");
+ return 1;
+ }
+ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ if (!ppos)
+ return 1;
+ if (parent->vendor != 0x8086)
+ return 1;
+
+ /*
+ * - bit 12: Max_rdcmp_Imt_EN: need to set to 1
+ * - bit 11: COALESCE_FORCE: need to set to 0
+ * - bit 10: COALESCE_EN: need to set to 1
+ * (but limitations on some on some chipsets)
+ *
+ * On the Intel 5000, 5100, and 7300 chipsets, there is
+ * also: - bit 25:24: COALESCE_MODE, need to set to 0
+ */
+ devid = parent->device;
+ if (devid >= 0x25e2 && devid <= 0x25fa) {
+ u8 rev;
+
+ /* 5000 P/V/X/Z */
+ pci_read_config_byte(parent, PCI_REVISION_ID, &rev);
+ if (rev <= 0xb2)
+ bits = 1U << 10;
+ else
+ bits = 7U << 10;
+ mask = (3U << 24) | (7U << 10);
+ } else if (devid >= 0x65e2 && devid <= 0x65fa) {
+ /* 5100 */
+ bits = 1U << 10;
+ mask = (3U << 24) | (7U << 10);
+ } else if (devid >= 0x4021 && devid <= 0x402e) {
+ /* 5400 */
+ bits = 7U << 10;
+ mask = 7U << 10;
+ } else if (devid >= 0x3604 && devid <= 0x360a) {
+ /* 7300 */
+ bits = 7U << 10;
+ mask = (3U << 24) | (7U << 10);
+ } else {
+ /* not one of the chipsets that we know about */
+ return 1;
+ }
+ pci_read_config_dword(parent, 0x48, &val);
+ val &= ~mask;
+ val |= bits;
+ r = pci_write_config_dword(parent, 0x48, val);
+ return 0;
+}
+
+/*
+ * BIOS may not set PCIe bus-utilization parameters for best performance.
+ * Check and optionally adjust them to maximize our throughput.
+ */
+static int qib_pcie_caps;
+module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)");
+
+static int qib_tune_pcie_caps(struct qib_devdata *dd)
+{
+ int ret = 1; /* Assume the worst */
+ struct pci_dev *parent;
+ int ppos, epos;
+ u16 pcaps, pctl, ecaps, ectl;
+ int rc_sup, ep_sup;
+ int rc_cur, ep_cur;
+
+ /* Find out supported and configured values for parent (root) */
+ parent = dd->pcidev->bus->self;
+ if (parent->bus->parent) {
+ qib_devinfo(dd->pcidev, "Parent not root\n");
+ goto bail;
+ }
+ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ if (ppos) {
+ pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
+ pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
+ } else
+ goto bail;
+ /* Find out supported and configured values for endpoint (us) */
+ epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ if (epos) {
+ pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
+ pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
+ } else
+ goto bail;
+ ret = 0;
+ /* Find max payload supported by root, endpoint */
+ rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
+ ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD);
+ if (rc_sup > ep_sup)
+ rc_sup = ep_sup;
+
+ rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD);
+ ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD);
+
+ /* If Supported greater than limit in module param, limit it */
+ if (rc_sup > (qib_pcie_caps & 7))
+ rc_sup = qib_pcie_caps & 7;
+ /* If less than (allowed, supported), bump root payload */
+ if (rc_sup > rc_cur) {
+ rc_cur = rc_sup;
+ pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
+ val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
+ pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
+ }
+ /* If less than (allowed, supported), bump endpoint payload */
+ if (rc_sup > ep_cur) {
+ ep_cur = rc_sup;
+ ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
+ val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
+ pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
+ }
+
+ /*
+ * Now the Read Request size.
+ * No field for max supported, but PCIe spec limits it to 4096,
+ * which is code '5' (log2(4096) - 7)
+ */
+ rc_sup = 5;
+ if (rc_sup > ((qib_pcie_caps >> 4) & 7))
+ rc_sup = (qib_pcie_caps >> 4) & 7;
+ rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ);
+ ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ);
+
+ if (rc_sup > rc_cur) {
+ rc_cur = rc_sup;
+ pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
+ val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
+ pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
+ }
+ if (rc_sup > ep_cur) {
+ ep_cur = rc_sup;
+ ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
+ val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
+ pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
+ }
+bail:
+ return ret;
+}
+/* End of PCIe capability tuning */
+
+/*
+ * From here through qib_pci_err_handler definition is invoked via
+ * PCI error infrastructure, registered via pci
+ */
+static pci_ers_result_t
+qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ qib_devinfo(pdev, "State Normal, ignoring\n");
+ break;
+
+ case pci_channel_io_frozen:
+ qib_devinfo(pdev, "State Frozen, requesting reset\n");
+ pci_disable_device(pdev);
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ break;
+
+ case pci_channel_io_perm_failure:
+ qib_devinfo(pdev, "State Permanent Failure, disabling\n");
+ if (dd) {
+ /* no more register accesses! */
+ dd->flags &= ~QIB_PRESENT;
+ qib_disable_after_error(dd);
+ }
+ /* else early, or other problem */
+ ret = PCI_ERS_RESULT_DISCONNECT;
+ break;
+
+ default: /* shouldn't happen */
+ qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
+ state);
+ break;
+ }
+ return ret;
+}
+
+static pci_ers_result_t
+qib_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ u64 words = 0U;
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+ if (dd && dd->pport) {
+ words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
+ if (words == ~0ULL)
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ }
+ qib_devinfo(pdev, "QIB mmio_enabled function called, "
+ "read wordscntr %Lx, returning %d\n", words, ret);
+ return ret;
+}
+
+static pci_ers_result_t
+qib_pci_slot_reset(struct pci_dev *pdev)
+{
+ qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t
+qib_pci_link_reset(struct pci_dev *pdev)
+{
+ qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static void
+qib_pci_resume(struct pci_dev *pdev)
+{
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ qib_devinfo(pdev, "QIB resume function called\n");
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ /*
+ * Running jobs will fail, since it's asynchronous
+ * unlike sysfs-requested reset. Better than
+ * doing nothing.
+ */
+ qib_init(dd, 1); /* same as re-init after reset */
+}
+
+struct pci_error_handlers qib_pci_err_handler = {
+ .error_detected = qib_pci_error_detected,
+ .mmio_enabled = qib_pci_mmio_enabled,
+ .link_reset = qib_pci_link_reset,
+ .slot_reset = qib_pci_slot_reset,
+ .resume = qib_pci_resume,
+};
diff --git a/drivers/infiniband/hw/qib/qib_pio_copy.c b/drivers/infiniband/hw/qib/qib_pio_copy.c
new file mode 100644
index 000000000000..10b8c444dd31
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pio_copy.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/**
+ * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits
+ * @to: destination, in MMIO space (must be 64-bit aligned)
+ * @from: source (must be 64-bit aligned)
+ * @count: number of 32-bit quantities to copy
+ *
+ * Copy data from kernel space to MMIO space, in multiples of 32 bits at a
+ * time. Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void qib_pio_copy(void __iomem *to, const void *from, size_t count)
+{
+#ifdef CONFIG_64BIT
+ u64 __iomem *dst = to;
+ const u64 *src = from;
+ const u64 *end = src + (count >> 1);
+
+ while (src < end)
+ __raw_writeq(*src++, dst++);
+ if (count & 1)
+ __raw_writel(*(const u32 *)src, dst);
+#else
+ u32 __iomem *dst = to;
+ const u32 *src = from;
+ const u32 *end = src + count;
+
+ while (src < end)
+ __raw_writel(*src++, dst++);
+#endif
+}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
new file mode 100644
index 000000000000..e0f65e39076b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -0,0 +1,1255 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
+#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+
+static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
+ struct qpn_map *map, unsigned off)
+{
+ return (map - qpt->map) * BITS_PER_PAGE + off;
+}
+
+static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
+ struct qpn_map *map, unsigned off,
+ unsigned r)
+{
+ if (qpt->mask) {
+ off++;
+ if ((off & qpt->mask) >> 1 != r)
+ off = ((off & qpt->mask) ?
+ (off | qpt->mask) + 1 : off) | (r << 1);
+ } else
+ off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
+ return off;
+}
+
+/*
+ * Convert the AETH credit code into the number of credits.
+ */
+static u32 credit_table[31] = {
+ 0, /* 0 */
+ 1, /* 1 */
+ 2, /* 2 */
+ 3, /* 3 */
+ 4, /* 4 */
+ 6, /* 5 */
+ 8, /* 6 */
+ 12, /* 7 */
+ 16, /* 8 */
+ 24, /* 9 */
+ 32, /* A */
+ 48, /* B */
+ 64, /* C */
+ 96, /* D */
+ 128, /* E */
+ 192, /* F */
+ 256, /* 10 */
+ 384, /* 11 */
+ 512, /* 12 */
+ 768, /* 13 */
+ 1024, /* 14 */
+ 1536, /* 15 */
+ 2048, /* 16 */
+ 3072, /* 17 */
+ 4096, /* 18 */
+ 6144, /* 19 */
+ 8192, /* 1A */
+ 12288, /* 1B */
+ 16384, /* 1C */
+ 24576, /* 1D */
+ 32768 /* 1E */
+};
+
+static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
+{
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+
+ /*
+ * Free the page if someone raced with us installing it.
+ */
+
+ spin_lock(&qpt->lock);
+ if (map->page)
+ free_page(page);
+ else
+ map->page = (void *)page;
+ spin_unlock(&qpt->lock);
+}
+
+/*
+ * Allocate the next available QPN or
+ * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
+ */
+static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+ enum ib_qp_type type, u8 port)
+{
+ u32 i, offset, max_scan, qpn;
+ struct qpn_map *map;
+ u32 ret;
+ int r;
+
+ if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
+ unsigned n;
+
+ ret = type == IB_QPT_GSI;
+ n = 1 << (ret + 2 * (port - 1));
+ spin_lock(&qpt->lock);
+ if (qpt->flags & n)
+ ret = -EINVAL;
+ else
+ qpt->flags |= n;
+ spin_unlock(&qpt->lock);
+ goto bail;
+ }
+
+ r = smp_processor_id();
+ if (r >= dd->n_krcv_queues)
+ r %= dd->n_krcv_queues;
+ qpn = qpt->last + 1;
+ if (qpn >= QPN_MAX)
+ qpn = 2;
+ if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
+ qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
+ (r << 1);
+ offset = qpn & BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / BITS_PER_PAGE];
+ max_scan = qpt->nmaps - !offset;
+ for (i = 0;;) {
+ if (unlikely(!map->page)) {
+ get_map_page(qpt, map);
+ if (unlikely(!map->page))
+ break;
+ }
+ do {
+ if (!test_and_set_bit(offset, map->page)) {
+ qpt->last = qpn;
+ ret = qpn;
+ goto bail;
+ }
+ offset = find_next_offset(qpt, map, offset, r);
+ qpn = mk_qpn(qpt, map, offset);
+ /*
+ * This test differs from alloc_pidmap().
+ * If find_next_offset() does find a zero
+ * bit, we don't need to check for QPN
+ * wrapping around past our starting QPN.
+ * We just need to be sure we don't loop
+ * forever.
+ */
+ } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+ /*
+ * In order to keep the number of pages allocated to a
+ * minimum, we scan the all existing pages before increasing
+ * the size of the bitmap table.
+ */
+ if (++i > max_scan) {
+ if (qpt->nmaps == QPNMAP_ENTRIES)
+ break;
+ map = &qpt->map[qpt->nmaps++];
+ offset = qpt->mask ? (r << 1) : 0;
+ } else if (map < &qpt->map[qpt->nmaps]) {
+ ++map;
+ offset = qpt->mask ? (r << 1) : 0;
+ } else {
+ map = &qpt->map[0];
+ offset = qpt->mask ? (r << 1) : 2;
+ }
+ qpn = mk_qpn(qpt, map, offset);
+ }
+
+ ret = -ENOMEM;
+
+bail:
+ return ret;
+}
+
+static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
+{
+ struct qpn_map *map;
+
+ map = qpt->map + qpn / BITS_PER_PAGE;
+ if (map->page)
+ clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
+}
+
+/*
+ * Put the QP into the hash table.
+ * The hash table holds a reference to the QP.
+ */
+static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+
+ if (qp->ibqp.qp_num == 0)
+ ibp->qp0 = qp;
+ else if (qp->ibqp.qp_num == 1)
+ ibp->qp1 = qp;
+ else {
+ qp->next = dev->qp_table[n];
+ dev->qp_table[n] = qp;
+ }
+ atomic_inc(&qp->refcount);
+
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+}
+
+/*
+ * Remove the QP from the table so it can't be found asynchronously by
+ * the receive interrupt routine.
+ */
+static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_qp *q, **qpp;
+ unsigned long flags;
+
+ qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+
+ if (ibp->qp0 == qp) {
+ ibp->qp0 = NULL;
+ atomic_dec(&qp->refcount);
+ } else if (ibp->qp1 == qp) {
+ ibp->qp1 = NULL;
+ atomic_dec(&qp->refcount);
+ } else
+ for (; (q = *qpp) != NULL; qpp = &q->next)
+ if (q == qp) {
+ *qpp = qp->next;
+ qp->next = NULL;
+ atomic_dec(&qp->refcount);
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+}
+
+/**
+ * qib_free_all_qps - check for QPs still in use
+ * @qpt: the QP table to empty
+ *
+ * There should not be any QPs still in use.
+ * Free memory for table.
+ */
+unsigned qib_free_all_qps(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ unsigned long flags;
+ struct qib_qp *qp;
+ unsigned n, qp_inuse = 0;
+
+ for (n = 0; n < dd->num_pports; n++) {
+ struct qib_ibport *ibp = &dd->pport[n].ibport_data;
+
+ if (!qib_mcast_tree_empty(ibp))
+ qp_inuse++;
+ if (ibp->qp0)
+ qp_inuse++;
+ if (ibp->qp1)
+ qp_inuse++;
+ }
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+ for (n = 0; n < dev->qp_table_size; n++) {
+ qp = dev->qp_table[n];
+ dev->qp_table[n] = NULL;
+
+ for (; qp; qp = qp->next)
+ qp_inuse++;
+ }
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+
+ return qp_inuse;
+}
+
+/**
+ * qib_lookup_qpn - return the QP with the given QPN
+ * @qpt: the QP table
+ * @qpn: the QP number to look up
+ *
+ * The caller is responsible for decrementing the QP reference count
+ * when done.
+ */
+struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
+{
+ struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
+ unsigned long flags;
+ struct qib_qp *qp;
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+
+ if (qpn == 0)
+ qp = ibp->qp0;
+ else if (qpn == 1)
+ qp = ibp->qp1;
+ else
+ for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
+ qp = qp->next)
+ if (qp->ibqp.qp_num == qpn)
+ break;
+ if (qp)
+ atomic_inc(&qp->refcount);
+
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ return qp;
+}
+
+/**
+ * qib_reset_qp - initialize the QP state to the reset state
+ * @qp: the QP to reset
+ * @type: the QP type
+ */
+static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
+{
+ qp->remote_qpn = 0;
+ qp->qkey = 0;
+ qp->qp_access_flags = 0;
+ atomic_set(&qp->s_dma_busy, 0);
+ qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
+ qp->s_hdrwords = 0;
+ qp->s_wqe = NULL;
+ qp->s_draining = 0;
+ qp->s_next_psn = 0;
+ qp->s_last_psn = 0;
+ qp->s_sending_psn = 0;
+ qp->s_sending_hpsn = 0;
+ qp->s_psn = 0;
+ qp->r_psn = 0;
+ qp->r_msn = 0;
+ if (type == IB_QPT_RC) {
+ qp->s_state = IB_OPCODE_RC_SEND_LAST;
+ qp->r_state = IB_OPCODE_RC_SEND_LAST;
+ } else {
+ qp->s_state = IB_OPCODE_UC_SEND_LAST;
+ qp->r_state = IB_OPCODE_UC_SEND_LAST;
+ }
+ qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+ qp->r_nak_state = 0;
+ qp->r_aflags = 0;
+ qp->r_flags = 0;
+ qp->s_head = 0;
+ qp->s_tail = 0;
+ qp->s_cur = 0;
+ qp->s_acked = 0;
+ qp->s_last = 0;
+ qp->s_ssn = 1;
+ qp->s_lsn = 0;
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
+ qp->r_head_ack_queue = 0;
+ qp->s_tail_ack_queue = 0;
+ qp->s_num_rd_atomic = 0;
+ if (qp->r_rq.wq) {
+ qp->r_rq.wq->head = 0;
+ qp->r_rq.wq->tail = 0;
+ }
+ qp->r_sge.num_sge = 0;
+}
+
+static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
+{
+ unsigned n;
+
+ if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ while (qp->s_rdma_read_sge.num_sge) {
+ atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
+ if (--qp->s_rdma_read_sge.num_sge)
+ qp->s_rdma_read_sge.sge =
+ *qp->s_rdma_read_sge.sg_list++;
+ }
+
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+
+ if (clr_sends) {
+ while (qp->s_last != qp->s_head) {
+ struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+ unsigned i;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ }
+ if (qp->s_rdma_mr) {
+ atomic_dec(&qp->s_rdma_mr->refcount);
+ qp->s_rdma_mr = NULL;
+ }
+ }
+
+ if (qp->ibqp.qp_type != IB_QPT_RC)
+ return;
+
+ for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
+ struct qib_ack_entry *e = &qp->s_ack_queue[n];
+
+ if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
+ e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ }
+}
+
+/**
+ * qib_error_qp - put a QP into the error state
+ * @qp: the QP to put into the error state
+ * @err: the receive completion error to signal if a RWQE is active
+ *
+ * Flushes both send and receive work queues.
+ * Returns true if last WQE event should be generated.
+ * The QP s_lock should be held and interrupts disabled.
+ * If we are already in error state, just return.
+ */
+int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ib_wc wc;
+ int ret = 0;
+
+ if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
+ goto bail;
+
+ qp->state = IB_QPS_ERR;
+
+ if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+ spin_lock(&dev->pending_lock);
+ if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
+ qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
+ list_del_init(&qp->iowait);
+ }
+ spin_unlock(&dev->pending_lock);
+
+ if (!(qp->s_flags & QIB_S_BUSY)) {
+ qp->s_hdrwords = 0;
+ if (qp->s_rdma_mr) {
+ atomic_dec(&qp->s_rdma_mr->refcount);
+ qp->s_rdma_mr = NULL;
+ }
+ if (qp->s_tx) {
+ qib_put_txreq(qp->s_tx);
+ qp->s_tx = NULL;
+ }
+ }
+
+ /* Schedule the sending tasklet to drain the send work queue. */
+ if (qp->s_last != qp->s_head)
+ qib_schedule_send(qp);
+
+ clear_mr_refs(qp, 0);
+
+ memset(&wc, 0, sizeof(wc));
+ wc.qp = &qp->ibqp;
+ wc.opcode = IB_WC_RECV;
+
+ if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
+ wc.wr_id = qp->r_wr_id;
+ wc.status = err;
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wc.status = IB_WC_WR_FLUSH_ERR;
+
+ if (qp->r_rq.wq) {
+ struct qib_rwq *wq;
+ u32 head;
+ u32 tail;
+
+ spin_lock(&qp->r_rq.lock);
+
+ /* sanity check pointers before trusting them */
+ wq = qp->r_rq.wq;
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ while (tail != head) {
+ wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
+ if (++tail >= qp->r_rq.size)
+ tail = 0;
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wq->tail = tail;
+
+ spin_unlock(&qp->r_rq.lock);
+ } else if (qp->ibqp.event_handler)
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_modify_qp - modify the attributes of a queue pair
+ * @ibqp: the queue pair who's attributes we're modifying
+ * @attr: the new attributes
+ * @attr_mask: the mask of attributes to modify
+ * @udata: user data for libibverbs.so
+ *
+ * Returns 0 on success, otherwise returns an errno.
+ */
+int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_qp *qp = to_iqp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ struct ib_event ev;
+ int lastwqe = 0;
+ int mig = 0;
+ int ret;
+ u32 pmtu = 0; /* for gcc warning only */
+
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_lock);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ?
+ attr->cur_qp_state : qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask))
+ goto inval;
+
+ if (attr_mask & IB_QP_AV) {
+ if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
+ goto inval;
+ if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
+ goto inval;
+ if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
+ goto inval;
+ if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
+ goto inval;
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ if (attr->min_rnr_timer > 31)
+ goto inval;
+
+ if (attr_mask & IB_QP_PORT)
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI ||
+ attr->port_num == 0 ||
+ attr->port_num > ibqp->device->phys_port_cnt)
+ goto inval;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ if (attr->dest_qp_num > QIB_QPN_MASK)
+ goto inval;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ if (attr->retry_cnt > 7)
+ goto inval;
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ if (attr->rnr_retry > 7)
+ goto inval;
+
+ /*
+ * Don't allow invalid path_mtu values. OK to set greater
+ * than the active mtu (or even the max_cap, if we have tuned
+ * that to a small mtu. We'll set qp->path_mtu
+ * to the lesser of requested attribute mtu and active,
+ * for packetizing messages.
+ * Note that the QP port has to be set in INIT and MTU in RTR.
+ */
+ if (attr_mask & IB_QP_PATH_MTU) {
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int mtu, pidx = qp->port_num - 1;
+
+ mtu = ib_mtu_enum_to_int(attr->path_mtu);
+ if (mtu == -1)
+ goto inval;
+ if (mtu > dd->pport[pidx].ibmtu) {
+ switch (dd->pport[pidx].ibmtu) {
+ case 4096:
+ pmtu = IB_MTU_4096;
+ break;
+ case 2048:
+ pmtu = IB_MTU_2048;
+ break;
+ case 1024:
+ pmtu = IB_MTU_1024;
+ break;
+ case 512:
+ pmtu = IB_MTU_512;
+ break;
+ case 256:
+ pmtu = IB_MTU_256;
+ break;
+ default:
+ pmtu = IB_MTU_2048;
+ }
+ } else
+ pmtu = attr->path_mtu;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ if (attr->path_mig_state == IB_MIG_REARM) {
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ goto inval;
+ if (new_state != IB_QPS_RTS)
+ goto inval;
+ } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
+ if (qp->s_mig_state == IB_MIG_REARM)
+ goto inval;
+ if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
+ goto inval;
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ mig = 1;
+ } else
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
+ goto inval;
+
+ switch (new_state) {
+ case IB_QPS_RESET:
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+ spin_lock(&dev->pending_lock);
+ if (!list_empty(&qp->iowait))
+ list_del_init(&qp->iowait);
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+ /* Stop the sending work queue and retry timer */
+ cancel_work_sync(&qp->s_work);
+ del_timer_sync(&qp->s_timer);
+ wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
+ if (qp->s_tx) {
+ qib_put_txreq(qp->s_tx);
+ qp->s_tx = NULL;
+ }
+ remove_qp(dev, qp);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_lock);
+ clear_mr_refs(qp, 1);
+ qib_reset_qp(qp, ibqp->qp_type);
+ }
+ break;
+
+ case IB_QPS_RTR:
+ /* Allow event to retrigger if QP set to RTR more than once */
+ qp->r_flags &= ~QIB_R_COMM_EST;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQD:
+ qp->s_draining = qp->s_last != qp->s_cur;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQE:
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ goto inval;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_ERR:
+ lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ break;
+
+ default:
+ qp->state = new_state;
+ break;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ qp->s_pkey_index = attr->pkey_index;
+
+ if (attr_mask & IB_QP_PORT)
+ qp->port_num = attr->port_num;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ qp->remote_qpn = attr->dest_qp_num;
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
+ qp->s_psn = qp->s_next_psn;
+ qp->s_sending_psn = qp->s_next_psn;
+ qp->s_last_psn = qp->s_next_psn - 1;
+ qp->s_sending_hpsn = qp->s_last_psn;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->qp_access_flags = attr->qp_access_flags;
+
+ if (attr_mask & IB_QP_AV) {
+ qp->remote_ah_attr = attr->ah_attr;
+ qp->s_srate = attr->ah_attr.static_rate;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ qp->alt_ah_attr = attr->alt_ah_attr;
+ qp->s_alt_pkey_index = attr->alt_pkey_index;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ qp->s_mig_state = attr->path_mig_state;
+ if (mig) {
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = qp->alt_ah_attr.port_num;
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+ }
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU)
+ qp->path_mtu = pmtu;
+
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ qp->s_retry_cnt = attr->retry_cnt;
+ qp->s_retry = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ qp->s_rnr_retry_cnt = attr->rnr_retry;
+ qp->s_rnr_retry = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp->r_min_rnr_timer = attr->min_rnr_timer;
+
+ if (attr_mask & IB_QP_TIMEOUT)
+ qp->timeout = attr->timeout;
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+ qp->s_max_rd_atomic = attr->max_rd_atomic;
+
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ insert_qp(dev, qp);
+
+ if (lastwqe) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ if (mig) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ ret = 0;
+ goto bail;
+
+inval:
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+ ret = -EINVAL;
+
+bail:
+ return ret;
+}
+
+int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+
+ attr->qp_state = qp->state;
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = qp->path_mtu;
+ attr->path_mig_state = qp->s_mig_state;
+ attr->qkey = qp->qkey;
+ attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
+ attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
+ attr->dest_qp_num = qp->remote_qpn;
+ attr->qp_access_flags = qp->qp_access_flags;
+ attr->cap.max_send_wr = qp->s_size - 1;
+ attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
+ attr->cap.max_send_sge = qp->s_max_sge;
+ attr->cap.max_recv_sge = qp->r_rq.max_sge;
+ attr->cap.max_inline_data = 0;
+ attr->ah_attr = qp->remote_ah_attr;
+ attr->alt_ah_attr = qp->alt_ah_attr;
+ attr->pkey_index = qp->s_pkey_index;
+ attr->alt_pkey_index = qp->s_alt_pkey_index;
+ attr->en_sqd_async_notify = 0;
+ attr->sq_draining = qp->s_draining;
+ attr->max_rd_atomic = qp->s_max_rd_atomic;
+ attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
+ attr->min_rnr_timer = qp->r_min_rnr_timer;
+ attr->port_num = qp->port_num;
+ attr->timeout = qp->timeout;
+ attr->retry_cnt = qp->s_retry_cnt;
+ attr->rnr_retry = qp->s_rnr_retry_cnt;
+ attr->alt_port_num = qp->alt_ah_attr.port_num;
+ attr->alt_timeout = qp->alt_timeout;
+
+ init_attr->event_handler = qp->ibqp.event_handler;
+ init_attr->qp_context = qp->ibqp.qp_context;
+ init_attr->send_cq = qp->ibqp.send_cq;
+ init_attr->recv_cq = qp->ibqp.recv_cq;
+ init_attr->srq = qp->ibqp.srq;
+ init_attr->cap = attr->cap;
+ if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
+ init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ else
+ init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr->qp_type = qp->ibqp.qp_type;
+ init_attr->port_num = qp->port_num;
+ return 0;
+}
+
+/**
+ * qib_compute_aeth - compute the AETH (syndrome + MSN)
+ * @qp: the queue pair to compute the AETH for
+ *
+ * Returns the AETH.
+ */
+__be32 qib_compute_aeth(struct qib_qp *qp)
+{
+ u32 aeth = qp->r_msn & QIB_MSN_MASK;
+
+ if (qp->ibqp.srq) {
+ /*
+ * Shared receive queues don't generate credits.
+ * Set the credit field to the invalid value.
+ */
+ aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
+ } else {
+ u32 min, max, x;
+ u32 credits;
+ struct qib_rwq *wq = qp->r_rq.wq;
+ u32 head;
+ u32 tail;
+
+ /* sanity check pointers before trusting them */
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ /*
+ * Compute the number of credits available (RWQEs).
+ * XXX Not holding the r_rq.lock here so there is a small
+ * chance that the pair of reads are not atomic.
+ */
+ credits = head - tail;
+ if ((int)credits < 0)
+ credits += qp->r_rq.size;
+ /*
+ * Binary search the credit table to find the code to
+ * use.
+ */
+ min = 0;
+ max = 31;
+ for (;;) {
+ x = (min + max) / 2;
+ if (credit_table[x] == credits)
+ break;
+ if (credit_table[x] > credits)
+ max = x;
+ else if (min == x)
+ break;
+ else
+ min = x;
+ }
+ aeth |= x << QIB_AETH_CREDIT_SHIFT;
+ }
+ return cpu_to_be32(aeth);
+}
+
+/**
+ * qib_create_qp - create a queue pair for a device
+ * @ibpd: the protection domain who's device we create the queue pair for
+ * @init_attr: the attributes of the queue pair
+ * @udata: user data for libibverbs.so
+ *
+ * Returns the queue pair on success, otherwise returns an errno.
+ *
+ * Called by the ib_create_qp() core verbs function.
+ */
+struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct qib_qp *qp;
+ int err;
+ struct qib_swqe *swq = NULL;
+ struct qib_ibdev *dev;
+ struct qib_devdata *dd;
+ size_t sz;
+ size_t sg_list_sz;
+ struct ib_qp *ret;
+
+ if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
+ init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ /* Check receive queue parameters if no SRQ is specified. */
+ if (!init_attr->srq) {
+ if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
+ init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ if (init_attr->cap.max_send_sge +
+ init_attr->cap.max_send_wr +
+ init_attr->cap.max_recv_sge +
+ init_attr->cap.max_recv_wr == 0) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ }
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (init_attr->port_num == 0 ||
+ init_attr->port_num > ibpd->device->phys_port_cnt) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ case IB_QPT_UD:
+ sz = sizeof(struct qib_sge) *
+ init_attr->cap.max_send_sge +
+ sizeof(struct qib_swqe);
+ swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
+ if (swq == NULL) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+ sz = sizeof(*qp);
+ sg_list_sz = 0;
+ if (init_attr->srq) {
+ struct qib_srq *srq = to_isrq(init_attr->srq);
+
+ if (srq->rq.max_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (srq->rq.max_sge - 1);
+ } else if (init_attr->cap.max_recv_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (init_attr->cap.max_recv_sge - 1);
+ qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
+ if (!qp) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_swq;
+ }
+ if (init_attr->srq)
+ sz = 0;
+ else {
+ qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ sizeof(struct qib_rwqe);
+ qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
+ qp->r_rq.size * sz);
+ if (!qp->r_rq.wq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp;
+ }
+ }
+
+ /*
+ * ib_create_qp() will initialize qp->ibqp
+ * except for qp->ibqp.qp_num.
+ */
+ spin_lock_init(&qp->r_lock);
+ spin_lock_init(&qp->s_lock);
+ spin_lock_init(&qp->r_rq.lock);
+ atomic_set(&qp->refcount, 0);
+ init_waitqueue_head(&qp->wait);
+ init_waitqueue_head(&qp->wait_dma);
+ init_timer(&qp->s_timer);
+ qp->s_timer.data = (unsigned long)qp;
+ INIT_WORK(&qp->s_work, qib_do_send);
+ INIT_LIST_HEAD(&qp->iowait);
+ INIT_LIST_HEAD(&qp->rspwait);
+ qp->state = IB_QPS_RESET;
+ qp->s_wq = swq;
+ qp->s_size = init_attr->cap.max_send_wr + 1;
+ qp->s_max_sge = init_attr->cap.max_send_sge;
+ if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+ qp->s_flags = QIB_S_SIGNAL_REQ_WR;
+ dev = to_idev(ibpd->device);
+ dd = dd_from_dev(dev);
+ err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
+ init_attr->port_num);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ vfree(qp->r_rq.wq);
+ goto bail_qp;
+ }
+ qp->ibqp.qp_num = err;
+ qp->port_num = init_attr->port_num;
+ qp->processor_id = smp_processor_id();
+ qib_reset_qp(qp, init_attr->qp_type);
+ break;
+
+ default:
+ /* Don't support raw QPs */
+ ret = ERR_PTR(-ENOSYS);
+ goto bail;
+ }
+
+ init_attr->cap.max_inline_data = 0;
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ if (!qp->r_rq.wq) {
+ __u64 offset = 0;
+
+ err = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else {
+ u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
+
+ qp->ip = qib_create_mmap_info(dev, s,
+ ibpd->uobject->context,
+ qp->r_rq.wq);
+ if (!qp->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ err = ib_copy_to_udata(udata, &(qp->ip->offset),
+ sizeof(qp->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ }
+ }
+
+ spin_lock(&dev->n_qps_lock);
+ if (dev->n_qps_allocated == ib_qib_max_qps) {
+ spin_unlock(&dev->n_qps_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_qps_allocated++;
+ spin_unlock(&dev->n_qps_lock);
+
+ if (qp->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = &qp->ibqp;
+ goto bail;
+
+bail_ip:
+ if (qp->ip)
+ kref_put(&qp->ip->ref, qib_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
+ free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
+bail_qp:
+ kfree(qp);
+bail_swq:
+ vfree(swq);
+bail:
+ return ret;
+}
+
+/**
+ * qib_destroy_qp - destroy a queue pair
+ * @ibqp: the queue pair to destroy
+ *
+ * Returns 0 on success.
+ *
+ * Note that this can be called while the QP is actively sending or
+ * receiving!
+ */
+int qib_destroy_qp(struct ib_qp *ibqp)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+
+ /* Make sure HW and driver activity is stopped. */
+ spin_lock_irq(&qp->s_lock);
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+ spin_lock(&dev->pending_lock);
+ if (!list_empty(&qp->iowait))
+ list_del_init(&qp->iowait);
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
+ spin_unlock_irq(&qp->s_lock);
+ cancel_work_sync(&qp->s_work);
+ del_timer_sync(&qp->s_timer);
+ wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
+ if (qp->s_tx) {
+ qib_put_txreq(qp->s_tx);
+ qp->s_tx = NULL;
+ }
+ remove_qp(dev, qp);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+ clear_mr_refs(qp, 1);
+ } else
+ spin_unlock_irq(&qp->s_lock);
+
+ /* all user's cleaned up, mark it available */
+ free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
+ spin_lock(&dev->n_qps_lock);
+ dev->n_qps_allocated--;
+ spin_unlock(&dev->n_qps_lock);
+
+ if (qp->ip)
+ kref_put(&qp->ip->ref, qib_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
+ vfree(qp->s_wq);
+ kfree(qp);
+ return 0;
+}
+
+/**
+ * qib_init_qpn_table - initialize the QP number table for a device
+ * @qpt: the QPN table
+ */
+void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
+{
+ spin_lock_init(&qpt->lock);
+ qpt->last = 1; /* start with QPN 2 */
+ qpt->nmaps = 1;
+ qpt->mask = dd->qpn_mask;
+}
+
+/**
+ * qib_free_qpn_table - free the QP number table for a device
+ * @qpt: the QPN table
+ */
+void qib_free_qpn_table(struct qib_qpn_table *qpt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
+ if (qpt->map[i].page)
+ free_page((unsigned long) qpt->map[i].page);
+}
+
+/**
+ * qib_get_credit - flush the send work queue of a QP
+ * @qp: the qp who's send work queue to flush
+ * @aeth: the Acknowledge Extended Transport Header
+ *
+ * The QP s_lock should be held.
+ */
+void qib_get_credit(struct qib_qp *qp, u32 aeth)
+{
+ u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
+
+ /*
+ * If the credit is invalid, we can send
+ * as many packets as we like. Otherwise, we have to
+ * honor the credit field.
+ */
+ if (credit == QIB_AETH_CREDIT_INVAL) {
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
+ qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
+ if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+ qib_schedule_send(qp);
+ }
+ }
+ } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
+ /* Compute new LSN (i.e., MSN + credit) */
+ credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
+ if (qib_cmp24(credit, qp->s_lsn) > 0) {
+ qp->s_lsn = credit;
+ if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+ qib_schedule_send(qp);
+ }
+ }
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
new file mode 100644
index 000000000000..35b3604b691d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -0,0 +1,564 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+#include "qib_qsfp.h"
+
+/*
+ * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
+ * in qib_twsi.c
+ */
+#define QSFP_MAX_RETRY 4
+
+static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 out, mask;
+ int ret, cnt, pass = 0;
+ int stuck = 0;
+ u8 *buff = bp;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret)
+ goto no_unlock;
+
+ if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ /*
+ * We presume, if we are called at all, that this board has
+ * QSFP. This is on the same i2c chain as the legacy parts,
+ * but only responds if the module is selected via GPIO pins.
+ * Further, there are very long setup and hold requirements
+ * on MODSEL.
+ */
+ mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ if (ppd->hw_pidx) {
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+ out <<= QSFP_GPIO_PORT2_SHIFT;
+ }
+
+ dd->f_gpio_mod(dd, out, mask, mask);
+
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL, and there
+ * is no way to tell if it is ready, so we must wait.
+ */
+ msleep(2);
+
+ /* Make sure TWSI bus is in sane state. */
+ ret = qib_twsi_reset(dd);
+ if (ret) {
+ qib_dev_porterr(dd, ppd->port,
+ "QSFP interface Reset for read failed\n");
+ ret = -EIO;
+ stuck = 1;
+ goto deselect;
+ }
+
+ /* All QSFP modules are at A0 */
+
+ cnt = 0;
+ while (cnt < len) {
+ unsigned in_page;
+ int wlen = len - cnt;
+ in_page = addr % QSFP_PAGESIZE;
+ if ((in_page + wlen) > QSFP_PAGESIZE)
+ wlen = QSFP_PAGESIZE - in_page;
+ ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
+ /* Some QSFP's fail first try. Retry as experiment */
+ if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY)
+ continue;
+ if (ret) {
+ /* qib_twsi_blk_rd() 1 for error, else 0 */
+ ret = -EIO;
+ goto deselect;
+ }
+ addr += wlen;
+ cnt += wlen;
+ }
+ ret = cnt;
+
+deselect:
+ /*
+ * Module could take up to 10 uSec after transfer before
+ * ready to respond to MOD_SEL negation, and there is no way
+ * to tell if it is ready, so we must wait.
+ */
+ udelay(10);
+ /* set QSFP MODSEL, RST. LP all high */
+ dd->f_gpio_mod(dd, mask, mask, mask);
+
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL
+ * going away, and there is no way to tell if it is ready.
+ * so we must wait.
+ */
+ if (stuck)
+ qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
+
+ if (pass >= QSFP_MAX_RETRY && ret)
+ qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
+ else if (pass)
+ qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
+
+ msleep(2);
+
+bail:
+ mutex_unlock(&dd->eep_lock);
+
+no_unlock:
+ return ret;
+}
+
+/*
+ * qsfp_write
+ * We do not ordinarily write the QSFP, but this is needed to select
+ * the page on non-flat QSFPs, and possibly later unusual cases
+ */
+static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
+ int len)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 out, mask;
+ int ret, cnt;
+ u8 *buff = bp;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret)
+ goto no_unlock;
+
+ if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ /*
+ * We presume, if we are called at all, that this board has
+ * QSFP. This is on the same i2c chain as the legacy parts,
+ * but only responds if the module is selected via GPIO pins.
+ * Further, there are very long setup and hold requirements
+ * on MODSEL.
+ */
+ mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ if (ppd->hw_pidx) {
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+ out <<= QSFP_GPIO_PORT2_SHIFT;
+ }
+ dd->f_gpio_mod(dd, out, mask, mask);
+
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL,
+ * and there is no way to tell if it is ready, so we must wait.
+ */
+ msleep(2);
+
+ /* Make sure TWSI bus is in sane state. */
+ ret = qib_twsi_reset(dd);
+ if (ret) {
+ qib_dev_porterr(dd, ppd->port,
+ "QSFP interface Reset for write failed\n");
+ ret = -EIO;
+ goto deselect;
+ }
+
+ /* All QSFP modules are at A0 */
+
+ cnt = 0;
+ while (cnt < len) {
+ unsigned in_page;
+ int wlen = len - cnt;
+ in_page = addr % QSFP_PAGESIZE;
+ if ((in_page + wlen) > QSFP_PAGESIZE)
+ wlen = QSFP_PAGESIZE - in_page;
+ ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
+ if (ret) {
+ /* qib_twsi_blk_wr() 1 for error, else 0 */
+ ret = -EIO;
+ goto deselect;
+ }
+ addr += wlen;
+ cnt += wlen;
+ }
+ ret = cnt;
+
+deselect:
+ /*
+ * Module could take up to 10 uSec after transfer before
+ * ready to respond to MOD_SEL negation, and there is no way
+ * to tell if it is ready, so we must wait.
+ */
+ udelay(10);
+ /* set QSFP MODSEL, RST, LP high */
+ dd->f_gpio_mod(dd, mask, mask, mask);
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL
+ * going away, and there is no way to tell if it is ready.
+ * so we must wait.
+ */
+ msleep(2);
+
+bail:
+ mutex_unlock(&dd->eep_lock);
+
+no_unlock:
+ return ret;
+}
+
+/*
+ * For validation, we want to check the checksums, even of the
+ * fields we do not otherwise use. This function reads the bytes from
+ * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
+ */
+static int qsfp_cks(struct qib_pportdata *ppd, int first, int next)
+{
+ int ret;
+ u16 cks;
+ u8 bval;
+
+ cks = 0;
+ while (first < next) {
+ ret = qsfp_read(ppd, first, &bval, 1);
+ if (ret < 0)
+ goto bail;
+ cks += bval;
+ ++first;
+ }
+ ret = cks & 0xFF;
+bail:
+ return ret;
+
+}
+
+int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
+{
+ int ret;
+ int idx;
+ u16 cks;
+ u32 mask;
+ u8 peek[4];
+
+ /* ensure sane contents on invalid reads, for cable swaps */
+ memset(cp, 0, sizeof(*cp));
+
+ mask = QSFP_GPIO_MOD_PRS_N;
+ if (ppd->hw_pidx)
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+
+ ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
+ if (ret & mask) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ ret = qsfp_read(ppd, 0, peek, 3);
+ if (ret < 0)
+ goto bail;
+ if ((peek[0] & 0xFE) != 0x0C)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
+
+ if ((peek[2] & 2) == 0) {
+ /*
+ * If cable is paged, rather than "flat memory", we need to
+ * set the page to zero, Even if it already appears to be zero.
+ */
+ u8 poke = 0;
+ ret = qib_qsfp_write(ppd, 127, &poke, 1);
+ udelay(50);
+ if (ret != 1) {
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "Failed QSFP Page set\n");
+ goto bail;
+ }
+ }
+
+ ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1);
+ if (ret < 0)
+ goto bail;
+ if ((cp->id & 0xFE) != 0x0C)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id);
+ cks = cp->id;
+
+ ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->pwr;
+
+ ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks += ret;
+
+ ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->len;
+
+ ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->tech;
+
+ ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_VEND_LEN; ++idx)
+ cks += cp->vendor[idx];
+
+ ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->xt_xcv;
+
+ ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_VOUI_LEN; ++idx)
+ cks += cp->oui[idx];
+
+ ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_PN_LEN; ++idx)
+ cks += cp->partnum[idx];
+
+ ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_REV_LEN; ++idx)
+ cks += cp->rev[idx];
+
+ ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx)
+ cks += cp->atten[idx];
+
+ ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks += ret;
+
+ cks &= 0xFF;
+ ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1);
+ if (ret < 0)
+ goto bail;
+ if (cks != cp->cks1)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP cks1 is %02X, computed %02X\n", cp->cks1,
+ cks);
+
+ /* Second checksum covers 192 to (serial, date, lot) */
+ ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks = ret;
+
+ ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_SN_LEN; ++idx)
+ cks += cp->serial[idx];
+
+ ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_DATE_LEN; ++idx)
+ cks += cp->date[idx];
+
+ ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_LOT_LEN; ++idx)
+ cks += cp->lot[idx];
+
+ ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks += ret;
+
+ ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1);
+ if (ret < 0)
+ goto bail;
+ cks &= 0xFF;
+ if (cks != cp->cks2)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP cks2 is %02X, computed %02X\n", cp->cks2,
+ cks);
+ return 0;
+
+bail:
+ cp->id = 0;
+ return ret;
+}
+
+const char * const qib_qsfp_devtech[16] = {
+ "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
+ "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
+ "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
+ "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
+};
+
+#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
+#define QSFP_DEFAULT_HDR_CNT 224
+
+static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
+
+/*
+ * Initialize structures that control access to QSFP. Called once per port
+ * on cards that support QSFP.
+ */
+void qib_qsfp_init(struct qib_qsfp_data *qd,
+ void (*fevent)(struct work_struct *))
+{
+ u32 mask, highs;
+ int pins;
+
+ struct qib_devdata *dd = qd->ppd->dd;
+
+ /* Initialize work struct for later QSFP events */
+ INIT_WORK(&qd->work, fevent);
+
+ /*
+ * Later, we may want more validation. For now, just set up pins and
+ * blip reset. If module is present, call qib_refresh_qsfp_cache(),
+ * to do further init.
+ */
+ mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ highs = mask - QSFP_GPIO_MOD_RST_N;
+ if (qd->ppd->hw_pidx) {
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+ highs <<= QSFP_GPIO_PORT2_SHIFT;
+ }
+ dd->f_gpio_mod(dd, highs, mask, mask);
+ udelay(20); /* Generous RST dwell */
+
+ dd->f_gpio_mod(dd, mask, mask, mask);
+ /* Spec says module can take up to two seconds! */
+ mask = QSFP_GPIO_MOD_PRS_N;
+ if (qd->ppd->hw_pidx)
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+
+ /* Do not try to wait here. Better to let event handle it */
+ pins = dd->f_gpio_mod(dd, 0, 0, 0);
+ if (pins & mask)
+ goto bail;
+ /* We see a module, but it may be unwise to look yet. Just schedule */
+ qd->t_insert = get_jiffies_64();
+ schedule_work(&qd->work);
+bail:
+ return;
+}
+
+void qib_qsfp_deinit(struct qib_qsfp_data *qd)
+{
+ /*
+ * There is nothing to do here for now. our
+ * work is scheduled with schedule_work(), and
+ * flush_scheduled_work() from remove_one will
+ * block until all work ssetup with schedule_work()
+ * completes.
+ */
+}
+
+int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
+{
+ struct qib_qsfp_cache cd;
+ u8 bin_buff[QSFP_DUMP_CHUNK];
+ char lenstr[6];
+ int sofar, ret;
+ int bidx = 0;
+
+ sofar = 0;
+ ret = qib_refresh_qsfp_cache(ppd, &cd);
+ if (ret < 0)
+ goto bail;
+
+ lenstr[0] = ' ';
+ lenstr[1] = '\0';
+ if (QSFP_IS_CU(cd.tech))
+ sprintf(lenstr, "%dM ", cd.len);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes +
+ (QSFP_PWR(cd.pwr) * 4));
+
+ sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr,
+ qib_qsfp_devtech[cd.tech >> 4]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
+ QSFP_VEND_LEN, cd.vendor);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
+ QSFP_OUI(cd.oui));
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
+ QSFP_PN_LEN, cd.partnum);
+ sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
+ QSFP_REV_LEN, cd.rev);
+ if (QSFP_IS_CU(cd.tech))
+ sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n",
+ QSFP_ATTEN_SDR(cd.atten),
+ QSFP_ATTEN_DDR(cd.atten));
+ sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
+ QSFP_SN_LEN, cd.serial);
+ sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
+ QSFP_DATE_LEN, cd.date);
+ sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
+ QSFP_LOT_LEN, cd.date);
+
+ while (bidx < QSFP_DEFAULT_HDR_CNT) {
+ int iidx;
+ ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
+ if (ret < 0)
+ goto bail;
+ for (iidx = 0; iidx < ret; ++iidx) {
+ sofar += scnprintf(buf + sofar, len-sofar, " %02X",
+ bin_buff[iidx]);
+ }
+ sofar += scnprintf(buf + sofar, len - sofar, "\n");
+ bidx += QSFP_DUMP_CHUNK;
+ }
+ ret = sofar;
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
new file mode 100644
index 000000000000..19b527bafd57
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* QSFP support common definitions, for ib_qib driver */
+
+#define QSFP_DEV 0xA0
+#define QSFP_PWR_LAG_MSEC 2000
+
+/*
+ * Below are masks for various QSFP signals, for Port 1.
+ * Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT.
+ * _N means asserted low
+ */
+#define QSFP_GPIO_MOD_SEL_N (4)
+#define QSFP_GPIO_MOD_PRS_N (8)
+#define QSFP_GPIO_INT_N (0x10)
+#define QSFP_GPIO_MOD_RST_N (0x20)
+#define QSFP_GPIO_LP_MODE (0x40)
+#define QSFP_GPIO_PORT2_SHIFT 5
+
+#define QSFP_PAGESIZE 128
+/* Defined fields that QLogic requires of qualified cables */
+/* Byte 0 is Identifier, not checked */
+/* Byte 1 is reserved "status MSB" */
+/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
+/*
+ * Rest of first 128 not used, although 127 is reserved for page select
+ * if module is not "Flat memory".
+ */
+/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
+#define QSFP_MOD_ID_OFFS 128
+/*
+ * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
+ * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
+ */
+#define QSFP_MOD_PWR_OFFS 129
+/* Byte 130 is Connector type. Not QLogic req'd */
+/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
+/* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */
+/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */
+/* Byte 141 is Extended Rate Select. Not QLogic req'd */
+/* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */
+/* Byte 146 is length for Copper. Units of 1 meter */
+#define QSFP_MOD_LEN_OFFS 146
+/*
+ * Byte 147 is Device technology. D0..3 not Qlogc req'd
+ * D4..7 select from 15 choices, translated by table:
+ */
+#define QSFP_MOD_TECH_OFFS 147
+extern const char *const qib_qsfp_devtech[16];
+/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
+#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
+/* Attenuation should be valid for copper other than full/near Eq */
+#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
+/* Length is only valid if technology is "copper" */
+#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
+#define QSFP_TECH_1490 9
+
+#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
+ oui[2])
+#define QSFP_OUI_AMPHENOL 0x415048
+#define QSFP_OUI_FINISAR 0x009065
+#define QSFP_OUI_GORE 0x002177
+
+/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
+#define QSFP_VEND_OFFS 148
+#define QSFP_VEND_LEN 16
+/* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
+#define QSFP_IBXCV_OFFS 164
+/* Bytes 165..167 are Vendor OUI number */
+#define QSFP_VOUI_OFFS 165
+#define QSFP_VOUI_LEN 3
+/* Bytes 168..183 are Vendor Part Number, string */
+#define QSFP_PN_OFFS 168
+#define QSFP_PN_LEN 16
+/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
+#define QSFP_REV_OFFS 184
+#define QSFP_REV_LEN 2
+/*
+ * Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd
+ * If copper, they are attenuation in dB:
+ * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
+ */
+#define QSFP_ATTEN_OFFS 186
+#define QSFP_ATTEN_LEN 2
+/* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */
+/* Byte 190 is Max Case Temp. Not QLogic req'd */
+/* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */
+#define QSFP_CC_OFFS 191
+/* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */
+/* Bytes 196..211 are Serial Number, String */
+#define QSFP_SN_OFFS 196
+#define QSFP_SN_LEN 16
+/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
+#define QSFP_DATE_OFFS 212
+#define QSFP_DATE_LEN 6
+/* Bytes 218,219 are optional lot-code, string */
+#define QSFP_LOT_OFFS 218
+#define QSFP_LOT_LEN 2
+/* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */
+/* Byte 223 is LSB of sum of bytes 192..222 */
+#define QSFP_CC_EXT_OFFS 223
+
+/*
+ * struct qib_qsfp_data encapsulates state of QSFP device for one port.
+ * it will be part of port-chip-specific data if a board supports QSFP.
+ *
+ * Since multiple board-types use QSFP, and their pport_data structs
+ * differ (in the chip-specific section), we need a pointer to its head.
+ *
+ * Avoiding premature optimization, we will have one work_struct per port,
+ * and let the (increasingly inaccurately named) eep_lock arbitrate
+ * access to common resources.
+ *
+ */
+
+/*
+ * Hold the parts of the onboard EEPROM that we care about, so we aren't
+ * coonstantly bit-boffing
+ */
+struct qib_qsfp_cache {
+ u8 id; /* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */
+ u8 pwr; /* in D6,7 */
+ u8 len; /* in meters, Cu only */
+ u8 tech;
+ char vendor[QSFP_VEND_LEN];
+ u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */
+ u8 oui[QSFP_VOUI_LEN];
+ u8 partnum[QSFP_PN_LEN];
+ u8 rev[QSFP_REV_LEN];
+ u8 atten[QSFP_ATTEN_LEN];
+ u8 cks1; /* Checksum of bytes 128..190 */
+ u8 serial[QSFP_SN_LEN];
+ u8 date[QSFP_DATE_LEN];
+ u8 lot[QSFP_LOT_LEN];
+ u8 cks2; /* Checsum of bytes 192..222 */
+};
+
+#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
+#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
+#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
+
+struct qib_qsfp_data {
+ /* Helps to find our way */
+ struct qib_pportdata *ppd;
+ struct work_struct work;
+ struct qib_qsfp_cache cache;
+ u64 t_insert;
+};
+
+extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
+ struct qib_qsfp_cache *cp);
+extern void qib_qsfp_init(struct qib_qsfp_data *qd,
+ void (*fevent)(struct work_struct *));
+extern void qib_qsfp_deinit(struct qib_qsfp_data *qd);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
new file mode 100644
index 000000000000..40c0a373719c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -0,0 +1,2288 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/io.h>
+
+#include "qib.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_RC_##x
+
+static void rc_timeout(unsigned long arg);
+
+static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
+ u32 psn, u32 pmtu)
+{
+ u32 len;
+
+ len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
+ ss->sge = wqe->sg_list[0];
+ ss->sg_list = wqe->sg_list + 1;
+ ss->num_sge = wqe->wr.num_sge;
+ ss->total_len = wqe->length;
+ qib_skip_sge(ss, len, 0);
+ return wqe->length - len;
+}
+
+static void start_timer(struct qib_qp *qp)
+{
+ qp->s_flags |= QIB_S_TIMER;
+ qp->s_timer.function = rc_timeout;
+ /* 4.096 usec. * (1 << qp->timeout) */
+ qp->s_timer.expires = jiffies +
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL);
+ add_timer(&qp->s_timer);
+}
+
+/**
+ * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
+ * @dev: the device for this QP
+ * @qp: a pointer to the QP
+ * @ohdr: a pointer to the IB header being constructed
+ * @pmtu: the path MTU
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ * Note that we are in the responder's side of the QP context.
+ * Note the QP s_lock must be held.
+ */
+static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
+ struct qib_other_headers *ohdr, u32 pmtu)
+{
+ struct qib_ack_entry *e;
+ u32 hwords;
+ u32 len;
+ u32 bth0;
+ u32 bth2;
+
+ /* Don't send an ACK if we aren't supposed to. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto bail;
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+
+ switch (qp->s_ack_state) {
+ case OP(RDMA_READ_RESPONSE_LAST):
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ if (e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ /* FALLTHROUGH */
+ case OP(ATOMIC_ACKNOWLEDGE):
+ /*
+ * We can increment the tail pointer now that the last
+ * response has been sent instead of only being
+ * constructed.
+ */
+ if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
+ qp->s_tail_ack_queue = 0;
+ /* FALLTHROUGH */
+ case OP(SEND_ONLY):
+ case OP(ACKNOWLEDGE):
+ /* Check for no next entry in the queue. */
+ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
+ if (qp->s_flags & QIB_S_ACK_PENDING)
+ goto normal;
+ goto bail;
+ }
+
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST)) {
+ /*
+ * If a RDMA read response is being resent and
+ * we haven't seen the duplicate request yet,
+ * then stop sending the remaining responses the
+ * responder has seen until the requester resends it.
+ */
+ len = e->rdma_sge.sge_length;
+ if (len && !e->rdma_sge.mr) {
+ qp->s_tail_ack_queue = qp->r_head_ack_queue;
+ goto bail;
+ }
+ /* Copy SGE state in case we need to resend */
+ qp->s_rdma_mr = e->rdma_sge.mr;
+ if (qp->s_rdma_mr)
+ atomic_inc(&qp->s_rdma_mr->refcount);
+ qp->s_ack_rdma_sge.sge = e->rdma_sge;
+ qp->s_ack_rdma_sge.num_sge = 1;
+ qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ if (len > pmtu) {
+ len = pmtu;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
+ } else {
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
+ e->sent = 1;
+ }
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ hwords++;
+ qp->s_ack_rdma_psn = e->psn;
+ bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
+ } else {
+ /* COMPARE_SWAP or FETCH_ADD */
+ qp->s_cur_sge = NULL;
+ len = 0;
+ qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
+ ohdr->u.at.aeth = qib_compute_aeth(qp);
+ ohdr->u.at.atomic_ack_eth[0] =
+ cpu_to_be32(e->atomic_data >> 32);
+ ohdr->u.at.atomic_ack_eth[1] =
+ cpu_to_be32(e->atomic_data);
+ hwords += sizeof(ohdr->u.at) / sizeof(u32);
+ bth2 = e->psn & QIB_PSN_MASK;
+ e->sent = 1;
+ }
+ bth0 = qp->s_ack_state << 24;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
+ if (qp->s_rdma_mr)
+ atomic_inc(&qp->s_rdma_mr->refcount);
+ len = qp->s_ack_rdma_sge.sge.sge_length;
+ if (len > pmtu)
+ len = pmtu;
+ else {
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ hwords++;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ e->sent = 1;
+ }
+ bth0 = qp->s_ack_state << 24;
+ bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
+ break;
+
+ default:
+normal:
+ /*
+ * Send a regular ACK.
+ * Set the s_ack_state so we wait until after sending
+ * the ACK before setting s_ack_state to ACKNOWLEDGE
+ * (see above).
+ */
+ qp->s_ack_state = OP(SEND_ONLY);
+ qp->s_flags &= ~QIB_S_ACK_PENDING;
+ qp->s_cur_sge = NULL;
+ if (qp->s_nak_state)
+ ohdr->u.aeth =
+ cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
+ (qp->s_nak_state <<
+ QIB_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ hwords++;
+ len = 0;
+ bth0 = OP(ACKNOWLEDGE) << 24;
+ bth2 = qp->s_ack_psn & QIB_PSN_MASK;
+ }
+ qp->s_rdma_ack_cnt++;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_size = len;
+ qib_make_ruc_header(qp, ohdr, bth0, bth2);
+ return 1;
+
+bail:
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
+ return 0;
+}
+
+/**
+ * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
+ * @qp: a pointer to the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_rc_req(struct qib_qp *qp)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct qib_other_headers *ohdr;
+ struct qib_sge_state *ss;
+ struct qib_swqe *wqe;
+ u32 hwords;
+ u32 len;
+ u32 bth0;
+ u32 bth2;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ char newreq;
+ unsigned long flags;
+ int ret = 0;
+ int delta;
+
+ ohdr = &qp->s_hdr.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &qp->s_hdr.u.l.oth;
+
+ /*
+ * The lock is needed to synchronize between the sending tasklet,
+ * the receive interrupt handler, and timeout resends.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Sending responses has higher priority over sending requests. */
+ if ((qp->s_flags & QIB_S_RESP_PENDING) &&
+ qib_make_rc_ack(dev, qp, ohdr, pmtu))
+ goto done;
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
+ if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ while (qp->s_last != qp->s_acked) {
+ qib_send_complete(qp, wqe, IB_WC_SUCCESS);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ }
+ qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
+
+ if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK))
+ goto bail;
+
+ if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
+ if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
+ qp->s_flags |= QIB_S_WAIT_PSN;
+ goto bail;
+ }
+ qp->s_sending_psn = qp->s_psn;
+ qp->s_sending_hpsn = qp->s_psn - 1;
+ }
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ bth0 = 0;
+
+ /* Send a request. */
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ switch (qp->s_state) {
+ default:
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK))
+ goto bail;
+ /*
+ * Resend an old request or start a new one.
+ *
+ * We keep track of the current SWQE so that
+ * we don't reset the "furthest progress" state
+ * if we need to back up.
+ */
+ newreq = 0;
+ if (qp->s_cur == qp->s_tail) {
+ /* Check if send work queue is empty. */
+ if (qp->s_tail == qp->s_head)
+ goto bail;
+ /*
+ * If a fence is requested, wait for previous
+ * RDMA read and atomic operations to finish.
+ */
+ if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
+ qp->s_num_rd_atomic) {
+ qp->s_flags |= QIB_S_WAIT_FENCE;
+ goto bail;
+ }
+ wqe->psn = qp->s_next_psn;
+ newreq = 1;
+ }
+ /*
+ * Note that we have to be careful not to modify the
+ * original work request since we may need to resend
+ * it.
+ */
+ len = wqe->length;
+ ss = &qp->s_sge;
+ bth2 = qp->s_psn & QIB_PSN_MASK;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+ qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+ qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
+ wqe->lpsn = wqe->psn;
+ if (len > pmtu) {
+ wqe->lpsn += (len - 1) / pmtu;
+ qp->s_state = OP(SEND_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_ONLY);
+ else {
+ qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ /* FALLTHROUGH */
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+ qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+ qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ hwords += sizeof(struct ib_reth) / sizeof(u32);
+ wqe->lpsn = wqe->psn;
+ if (len > pmtu) {
+ wqe->lpsn += (len - 1) / pmtu;
+ qp->s_state = OP(RDMA_WRITE_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_ONLY);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after RETH */
+ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_READ:
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow.
+ */
+ if (newreq) {
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= QIB_S_WAIT_RDMAR;
+ goto bail;
+ }
+ qp->s_num_rd_atomic++;
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ /*
+ * Adjust s_next_psn to count the
+ * expected number of responses.
+ */
+ if (len > pmtu)
+ qp->s_next_psn += (len - 1) / pmtu;
+ wqe->lpsn = qp->s_next_psn++;
+ }
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+ ss = NULL;
+ len = 0;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow.
+ */
+ if (newreq) {
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= QIB_S_WAIT_RDMAR;
+ goto bail;
+ }
+ qp->s_num_rd_atomic++;
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ wqe->lpsn = wqe->psn;
+ }
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ qp->s_state = OP(COMPARE_SWAP);
+ ohdr->u.atomic_eth.swap_data = cpu_to_be64(
+ wqe->wr.wr.atomic.swap);
+ ohdr->u.atomic_eth.compare_data = cpu_to_be64(
+ wqe->wr.wr.atomic.compare_add);
+ } else {
+ qp->s_state = OP(FETCH_ADD);
+ ohdr->u.atomic_eth.swap_data = cpu_to_be64(
+ wqe->wr.wr.atomic.compare_add);
+ ohdr->u.atomic_eth.compare_data = 0;
+ }
+ ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
+ wqe->wr.wr.atomic.remote_addr >> 32);
+ ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
+ wqe->wr.wr.atomic.remote_addr);
+ ohdr->u.atomic_eth.rkey = cpu_to_be32(
+ wqe->wr.wr.atomic.rkey);
+ hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
+ ss = NULL;
+ len = 0;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ default:
+ goto bail;
+ }
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ qp->s_len = wqe->length;
+ if (newreq) {
+ qp->s_tail++;
+ if (qp->s_tail >= qp->s_size)
+ qp->s_tail = 0;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ qp->s_psn = wqe->lpsn + 1;
+ else {
+ qp->s_psn++;
+ if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ }
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
+ * thread to indicate a SEND needs to be restarted from an
+ * earlier PSN without interferring with the sending thread.
+ * See qib_restart_rc().
+ */
+ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+ /* FALLTHROUGH */
+ case OP(SEND_FIRST):
+ qp->s_state = OP(SEND_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ bth2 = qp->s_psn++ & QIB_PSN_MASK;
+ if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_LAST);
+ else {
+ qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth2 |= IB_BTH_REQ_ACK;
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_LAST is used by the ACK processing
+ * thread to indicate a RDMA write needs to be restarted from
+ * an earlier PSN without interferring with the sending thread.
+ * See qib_restart_rc().
+ */
+ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_FIRST):
+ qp->s_state = OP(RDMA_WRITE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ bth2 = qp->s_psn++ & QIB_PSN_MASK;
+ if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_LAST);
+ else {
+ qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ bth2 |= IB_BTH_REQ_ACK;
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
+ * thread to indicate a RDMA read needs to be restarted from
+ * an earlier PSN without interferring with the sending thread.
+ * See qib_restart_rc().
+ */
+ len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+ bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
+ qp->s_psn = wqe->lpsn + 1;
+ ss = NULL;
+ len = 0;
+ qp->s_cur++;
+ if (qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+ }
+ qp->s_sending_hpsn = bth2;
+ delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
+ if (delta && delta % QIB_PSN_CREDIT == 0)
+ bth2 |= IB_BTH_REQ_ACK;
+ if (qp->s_flags & QIB_S_SEND_ONE) {
+ qp->s_flags &= ~QIB_S_SEND_ONE;
+ qp->s_flags |= QIB_S_WAIT_ACK;
+ bth2 |= IB_BTH_REQ_ACK;
+ }
+ qp->s_len -= len;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_sge = ss;
+ qp->s_cur_size = len;
+ qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/**
+ * qib_send_rc_ack - Construct an ACK packet and send it
+ * @qp: a pointer to the QP
+ *
+ * This is called from qib_rc_rcv() and qib_kreceive().
+ * Note that RDMA reads and atomics are handled in the
+ * send side QP state and tasklet.
+ */
+void qib_send_rc_ack(struct qib_qp *qp)
+{
+ struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 pbc;
+ u16 lrh0;
+ u32 bth0;
+ u32 hwords;
+ u32 pbufn;
+ u32 __iomem *piobuf;
+ struct qib_ib_header hdr;
+ struct qib_other_headers *ohdr;
+ u32 control;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto unlock;
+
+ /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
+ if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
+ goto queue_ack;
+
+ /* Construct the header with s_lock held so APM doesn't change it. */
+ ohdr = &hdr.u.oth;
+ lrh0 = QIB_LRH_BTH;
+ /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
+ hwords = 6;
+ if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ hwords += qib_make_grh(ibp, &hdr.u.l.grh,
+ &qp->remote_ah_attr.grh, hwords, 0);
+ ohdr = &hdr.u.l.oth;
+ lrh0 = QIB_LRH_GRH;
+ }
+ /* read pkey_index w/o lock (its atomic) */
+ bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth0 |= IB_BTH_MIG_REQ;
+ if (qp->r_nak_state)
+ ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
+ (qp->r_nak_state <<
+ QIB_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
+ qp->remote_ah_attr.sl << 4;
+ hdr.lrh[0] = cpu_to_be16(lrh0);
+ hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
+ hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Don't try to send ACKs if the link isn't ACTIVE */
+ if (!(ppd->lflags & QIBL_LINKACTIVE))
+ goto done;
+
+ control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
+ qp->s_srate, lrh0 >> 12);
+ /* length is + 1 for the control dword */
+ pbc = ((u64) control << 32) | (hwords + 1);
+
+ piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
+ if (!piobuf) {
+ /*
+ * We are out of PIO buffers at the moment.
+ * Pass responsibility for sending the ACK to the
+ * send tasklet so that when a PIO buffer becomes
+ * available, the ACK is sent ahead of other outgoing
+ * packets.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ goto queue_ack;
+ }
+
+ /*
+ * Write the pbc.
+ * We have to flush after the PBC for correctness
+ * on some cpus or WC buffer can be written out of order.
+ */
+ writeq(pbc, piobuf);
+
+ if (dd->flags & QIB_PIO_FLUSH_WC) {
+ u32 *hdrp = (u32 *) &hdr;
+
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
+ qib_flush_wc();
+ __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
+ } else
+ qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
+
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pbufn);
+
+ ibp->n_unicast_xmit++;
+ goto done;
+
+queue_ack:
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ ibp->n_rc_qacks++;
+ qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
+ qp->s_nak_state = qp->r_nak_state;
+ qp->s_ack_psn = qp->r_ack_psn;
+
+ /* Schedule the send tasklet. */
+ qib_schedule_send(qp);
+ }
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+ return;
+}
+
+/**
+ * reset_psn - reset the QP state to send starting from PSN
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ *
+ * This is called from qib_rc_rcv() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void reset_psn(struct qib_qp *qp, u32 psn)
+{
+ u32 n = qp->s_acked;
+ struct qib_swqe *wqe = get_swqe_ptr(qp, n);
+ u32 opcode;
+
+ qp->s_cur = n;
+
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (qib_cmp24(psn, wqe->psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
+ }
+
+ /* Find the work request opcode corresponding to the given PSN. */
+ opcode = wqe->wr.opcode;
+ for (;;) {
+ int diff;
+
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail)
+ break;
+ wqe = get_swqe_ptr(qp, n);
+ diff = qib_cmp24(psn, wqe->psn);
+ if (diff < 0)
+ break;
+ qp->s_cur = n;
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (diff == 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
+ }
+ opcode = wqe->wr.opcode;
+ }
+
+ /*
+ * Set the state to restart in the middle of a request.
+ * Don't change the s_sge, s_cur_sge, or s_cur_size.
+ * See qib_make_rc_req().
+ */
+ switch (opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
+ break;
+
+ case IB_WR_RDMA_READ:
+ qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ break;
+
+ default:
+ /*
+ * This case shouldn't happen since its only
+ * one PSN per req.
+ */
+ qp->s_state = OP(SEND_LAST);
+ }
+done:
+ qp->s_psn = psn;
+ /*
+ * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
+ * asynchronously before the send tasklet can get scheduled.
+ * Doing it in qib_make_rc_req() is too late.
+ */
+ if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
+ (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
+ qp->s_flags |= QIB_S_WAIT_PSN;
+}
+
+/*
+ * Back up requester to resend the last un-ACKed request.
+ * The QP s_lock should be held and interrupts disabled.
+ */
+static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
+{
+ struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
+ struct qib_ibport *ibp;
+
+ if (qp->s_retry == 0) {
+ if (qp->s_mig_state == IB_MIG_ARMED) {
+ qib_migrate_qp(qp);
+ qp->s_retry = qp->s_retry_cnt;
+ } else if (qp->s_last == qp->s_acked) {
+ qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ return;
+ } else /* XXX need to handle delayed completion */
+ return;
+ } else
+ qp->s_retry--;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ ibp->n_rc_resends++;
+ else
+ ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+
+ qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
+ QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
+ QIB_S_WAIT_ACK);
+ if (wait)
+ qp->s_flags |= QIB_S_SEND_ONE;
+ reset_psn(qp, psn);
+}
+
+/*
+ * This is called from s_timer for missing responses.
+ */
+static void rc_timeout(unsigned long arg)
+{
+ struct qib_qp *qp = (struct qib_qp *)arg;
+ struct qib_ibport *ibp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_TIMER) {
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ibp->n_rc_timeouts++;
+ qp->s_flags &= ~QIB_S_TIMER;
+ del_timer(&qp->s_timer);
+ qib_restart_rc(qp, qp->s_last_psn + 1, 1);
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+/*
+ * This is called from s_timer for RNR timeouts.
+ */
+void qib_rc_rnr_retry(unsigned long arg)
+{
+ struct qib_qp *qp = (struct qib_qp *)arg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_RNR) {
+ qp->s_flags &= ~QIB_S_WAIT_RNR;
+ del_timer(&qp->s_timer);
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+/*
+ * Set qp->s_sending_psn to the next PSN after the given one.
+ * This would be psn+1 except when RDMA reads are present.
+ */
+static void reset_sending_psn(struct qib_qp *qp, u32 psn)
+{
+ struct qib_swqe *wqe;
+ u32 n = qp->s_last;
+
+ /* Find the work request corresponding to the given PSN. */
+ for (;;) {
+ wqe = get_swqe_ptr(qp, n);
+ if (qib_cmp24(psn, wqe->lpsn) <= 0) {
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ qp->s_sending_psn = wqe->lpsn + 1;
+ else
+ qp->s_sending_psn = psn + 1;
+ break;
+ }
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail)
+ break;
+ }
+}
+
+/*
+ * This should be called with the QP s_lock held and interrupts disabled.
+ */
+void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
+{
+ struct qib_other_headers *ohdr;
+ struct qib_swqe *wqe;
+ struct ib_wc wc;
+ unsigned i;
+ u32 opcode;
+ u32 psn;
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ return;
+
+ /* Find out where the BTH is */
+ if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ WARN_ON(!qp->s_rdma_ack_cnt);
+ qp->s_rdma_ack_cnt--;
+ return;
+ }
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ reset_sending_psn(qp, psn);
+
+ /*
+ * Start timer after a packet requesting an ACK has been sent and
+ * there are still requests that haven't been acked.
+ */
+ if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
+ !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+ start_timer(qp);
+
+ while (qp->s_last != qp->s_acked) {
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ /* Post a send completion queue entry if requested. */
+ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ memset(&wc, 0, sizeof wc);
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ }
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ }
+ /*
+ * If we were waiting for sends to complete before resending,
+ * and they are now complete, restart sending.
+ */
+ if (qp->s_flags & QIB_S_WAIT_PSN &&
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ qp->s_flags &= ~QIB_S_WAIT_PSN;
+ qp->s_sending_psn = qp->s_psn;
+ qp->s_sending_hpsn = qp->s_psn - 1;
+ qib_schedule_send(qp);
+ }
+}
+
+static inline void update_last_psn(struct qib_qp *qp, u32 psn)
+{
+ qp->s_last_psn = psn;
+}
+
+/*
+ * Generate a SWQE completion.
+ * This is similar to qib_send_complete but has to check to be sure
+ * that the SGEs are not being referenced if the SWQE is being resent.
+ */
+static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
+ struct qib_swqe *wqe,
+ struct qib_ibport *ibp)
+{
+ struct ib_wc wc;
+ unsigned i;
+
+ /*
+ * Don't decrement refcount and don't generate a
+ * completion if the SWQE is being resent until the send
+ * is finished.
+ */
+ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ /* Post a send completion queue entry if requested. */
+ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ memset(&wc, 0, sizeof wc);
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ }
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ } else
+ ibp->n_rc_delayed_comp++;
+
+ qp->s_retry = qp->s_retry_cnt;
+ update_last_psn(qp, wqe->lpsn);
+
+ /*
+ * If we are completing a request which is in the process of
+ * being resent, we can stop resending it since we know the
+ * responder has already seen it.
+ */
+ if (qp->s_acked == qp->s_cur) {
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ qp->s_acked = qp->s_cur;
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ if (qp->s_acked != qp->s_tail) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = wqe->psn;
+ }
+ } else {
+ if (++qp->s_acked >= qp->s_size)
+ qp->s_acked = 0;
+ if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
+ qp->s_draining = 0;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ }
+ return wqe;
+}
+
+/**
+ * do_rc_ack - process an incoming RC ACK
+ * @qp: the QP the ACK came in on
+ * @psn: the packet sequence number of the ACK
+ * @opcode: the opcode of the request that resulted in the ACK
+ *
+ * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ * Returns 1 if OK, 0 if current operation should be aborted (NAK).
+ */
+static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
+ u64 val, struct qib_ctxtdata *rcd)
+{
+ struct qib_ibport *ibp;
+ enum ib_wc_status status;
+ struct qib_swqe *wqe;
+ int ret = 0;
+ u32 ack_psn;
+ int diff;
+
+ /* Remove QP from retry timer */
+ if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+
+ /*
+ * Note that NAKs implicitly ACK outstanding SEND and RDMA write
+ * requests and implicitly NAK RDMA read and atomic requests issued
+ * before the NAK'ed request. The MSN won't include the NAK'ed
+ * request but will include an ACK'ed request(s).
+ */
+ ack_psn = psn;
+ if (aeth >> 29)
+ ack_psn--;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+
+ /*
+ * The MSN might be for a later WQE than the PSN indicates so
+ * only complete WQEs that the PSN finishes.
+ */
+ while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
+ /*
+ * RDMA_READ_RESPONSE_ONLY is a special case since
+ * we want to generate completion events for everything
+ * before the RDMA read, copy the data, then generate
+ * the completion for the read.
+ */
+ if (wqe->wr.opcode == IB_WR_RDMA_READ &&
+ opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
+ diff == 0) {
+ ret = 1;
+ goto bail;
+ }
+ /*
+ * If this request is a RDMA read or atomic, and the ACK is
+ * for a later operation, this ACK NAKs the RDMA read or
+ * atomic. In other words, only a RDMA_READ_LAST or ONLY
+ * can ACK a RDMA read and likewise for atomic ops. Note
+ * that the NAK case can only happen if relaxed ordering is
+ * used and requests are sent after an RDMA read or atomic
+ * is sent but before the response is received.
+ */
+ if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
+ (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
+ ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
+ (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
+ /* Retry this request. */
+ if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) {
+ qp->r_flags |= QIB_R_RDMAR_SEQ;
+ qib_restart_rc(qp, qp->s_last_psn + 1, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_SEND;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait,
+ &rcd->qp_wait_list);
+ }
+ }
+ /*
+ * No need to process the ACK/NAK since we are
+ * restarting an earlier request.
+ */
+ goto bail;
+ }
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ u64 *vaddr = wqe->sg_list[0].vaddr;
+ *vaddr = val;
+ }
+ if (qp->s_num_rd_atomic &&
+ (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
+ qp->s_num_rd_atomic--;
+ /* Restart sending task if fence is complete */
+ if ((qp->s_flags & QIB_S_WAIT_FENCE) &&
+ !qp->s_num_rd_atomic) {
+ qp->s_flags &= ~(QIB_S_WAIT_FENCE |
+ QIB_S_WAIT_ACK);
+ qib_schedule_send(qp);
+ } else if (qp->s_flags & QIB_S_WAIT_RDMAR) {
+ qp->s_flags &= ~(QIB_S_WAIT_RDMAR |
+ QIB_S_WAIT_ACK);
+ qib_schedule_send(qp);
+ }
+ }
+ wqe = do_rc_completion(qp, wqe, ibp);
+ if (qp->s_acked == qp->s_tail)
+ break;
+ }
+
+ switch (aeth >> 29) {
+ case 0: /* ACK */
+ ibp->n_rc_acks++;
+ if (qp->s_acked != qp->s_tail) {
+ /*
+ * We are expecting more ACKs so
+ * reset the retransmit timer.
+ */
+ start_timer(qp);
+ /*
+ * We can stop resending the earlier packets and
+ * continue with the next packet the receiver wants.
+ */
+ if (qib_cmp24(qp->s_psn, psn) <= 0)
+ reset_psn(qp, psn + 1);
+ } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = psn + 1;
+ }
+ if (qp->s_flags & QIB_S_WAIT_ACK) {
+ qp->s_flags &= ~QIB_S_WAIT_ACK;
+ qib_schedule_send(qp);
+ }
+ qib_get_credit(qp, aeth);
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ qp->s_retry = qp->s_retry_cnt;
+ update_last_psn(qp, psn);
+ ret = 1;
+ goto bail;
+
+ case 1: /* RNR NAK */
+ ibp->n_rnr_naks++;
+ if (qp->s_acked == qp->s_tail)
+ goto bail;
+ if (qp->s_flags & QIB_S_WAIT_RNR)
+ goto bail;
+ if (qp->s_rnr_retry == 0) {
+ status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto class_b;
+ }
+ if (qp->s_rnr_retry_cnt < 7)
+ qp->s_rnr_retry--;
+
+ /* The last valid PSN is the previous PSN. */
+ update_last_psn(qp, psn - 1);
+
+ ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+
+ reset_psn(qp, psn);
+
+ qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK);
+ qp->s_flags |= QIB_S_WAIT_RNR;
+ qp->s_timer.function = qib_rc_rnr_retry;
+ qp->s_timer.expires = jiffies + usecs_to_jiffies(
+ ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
+ QIB_AETH_CREDIT_MASK]);
+ add_timer(&qp->s_timer);
+ goto bail;
+
+ case 3: /* NAK */
+ if (qp->s_acked == qp->s_tail)
+ goto bail;
+ /* The last valid PSN is the previous PSN. */
+ update_last_psn(qp, psn - 1);
+ switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
+ QIB_AETH_CREDIT_MASK) {
+ case 0: /* PSN sequence error */
+ ibp->n_seq_naks++;
+ /*
+ * Back up to the responder's expected PSN.
+ * Note that we might get a NAK in the middle of an
+ * RDMA READ response which terminates the RDMA
+ * READ.
+ */
+ qib_restart_rc(qp, psn, 0);
+ qib_schedule_send(qp);
+ break;
+
+ case 1: /* Invalid Request */
+ status = IB_WC_REM_INV_REQ_ERR;
+ ibp->n_other_naks++;
+ goto class_b;
+
+ case 2: /* Remote Access Error */
+ status = IB_WC_REM_ACCESS_ERR;
+ ibp->n_other_naks++;
+ goto class_b;
+
+ case 3: /* Remote Operation Error */
+ status = IB_WC_REM_OP_ERR;
+ ibp->n_other_naks++;
+class_b:
+ if (qp->s_last == qp->s_acked) {
+ qib_send_complete(qp, wqe, status);
+ qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
+ break;
+
+ default:
+ /* Ignore other reserved NAK error codes */
+ goto reserved;
+ }
+ qp->s_retry = qp->s_retry_cnt;
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ goto bail;
+
+ default: /* 2: reserved */
+reserved:
+ /* Ignore reserved NAK codes. */
+ goto bail;
+ }
+
+bail:
+ return ret;
+}
+
+/*
+ * We have seen an out of sequence RDMA read middle or last packet.
+ * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
+ */
+static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_swqe *wqe;
+
+ /* Remove QP from retry timer */
+ if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+
+ while (qib_cmp24(psn, wqe->lpsn) > 0) {
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ break;
+ wqe = do_rc_completion(qp, wqe, ibp);
+ }
+
+ ibp->n_rdma_seq++;
+ qp->r_flags |= QIB_R_RDMAR_SEQ;
+ qib_restart_rc(qp, qp->s_last_psn + 1, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_SEND;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+}
+
+/**
+ * qib_rc_rcv_resp - process an incoming RC response packet
+ * @ibp: the port this packet came in on
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @hdrsize: the header length
+ * @pmtu: the path MTU
+ *
+ * This is called from qib_rc_rcv() to process an incoming RC response
+ * packet for the given QP.
+ * Called at interrupt level.
+ */
+static void qib_rc_rcv_resp(struct qib_ibport *ibp,
+ struct qib_other_headers *ohdr,
+ void *data, u32 tlen,
+ struct qib_qp *qp,
+ u32 opcode,
+ u32 psn, u32 hdrsize, u32 pmtu,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_swqe *wqe;
+ enum ib_wc_status status;
+ unsigned long flags;
+ int diff;
+ u32 pad;
+ u32 aeth;
+ u64 val;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Double check we can process this now that we hold the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto ack_done;
+
+ /* Ignore invalid responses. */
+ if (qib_cmp24(psn, qp->s_next_psn) >= 0)
+ goto ack_done;
+
+ /* Ignore duplicate responses. */
+ diff = qib_cmp24(psn, qp->s_last_psn);
+ if (unlikely(diff <= 0)) {
+ /* Update credits for "ghost" ACKs */
+ if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if ((aeth >> 29) == 0)
+ qib_get_credit(qp, aeth);
+ }
+ goto ack_done;
+ }
+
+ /*
+ * Skip everything other than the PSN we expect, if we are waiting
+ * for a reply to a restarted RDMA read or atomic op.
+ */
+ if (qp->r_flags & QIB_R_RDMAR_SEQ) {
+ if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
+ goto ack_done;
+ qp->r_flags &= ~QIB_R_RDMAR_SEQ;
+ }
+
+ if (unlikely(qp->s_acked == qp->s_tail))
+ goto ack_done;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ status = IB_WC_SUCCESS;
+
+ switch (opcode) {
+ case OP(ACKNOWLEDGE):
+ case OP(ATOMIC_ACKNOWLEDGE):
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
+ __be32 *p = ohdr->u.at.atomic_ack_eth;
+
+ val = ((u64) be32_to_cpu(p[0]) << 32) |
+ be32_to_cpu(p[1]);
+ } else
+ val = 0;
+ if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
+ opcode != OP(RDMA_READ_RESPONSE_FIRST))
+ goto ack_done;
+ hdrsize += 4;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+ /*
+ * If this is a response to a resent RDMA read, we
+ * have to be careful to copy the data to the right
+ * location.
+ */
+ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+ wqe, psn, pmtu);
+ goto read_middle;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /* no AETH, no ACK */
+ if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
+ goto ack_seq_err;
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+read_middle:
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto ack_len_err;
+ if (unlikely(pmtu >= qp->s_rdma_read_len))
+ goto ack_len_err;
+
+ /*
+ * We got a response so update the timeout.
+ * 4.096 usec. * (1 << qp->timeout)
+ */
+ qp->s_flags |= QIB_S_TIMER;
+ mod_timer(&qp->s_timer, jiffies +
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL));
+ if (qp->s_flags & QIB_S_WAIT_ACK) {
+ qp->s_flags &= ~QIB_S_WAIT_ACK;
+ qib_schedule_send(qp);
+ }
+
+ if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
+ qp->s_retry = qp->s_retry_cnt;
+
+ /*
+ * Update the RDMA receive state but do the copy w/o
+ * holding the locks and blocking interrupts.
+ */
+ qp->s_rdma_read_len -= pmtu;
+ update_last_psn(qp, psn);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
+ goto bail;
+
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
+ goto ack_done;
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /*
+ * Check that the data size is >= 0 && <= pmtu.
+ * Remember to account for the AETH header (4) and
+ * ICRC (4).
+ */
+ if (unlikely(tlen < (hdrsize + pad + 8)))
+ goto ack_len_err;
+ /*
+ * If this is a response to a resent RDMA read, we
+ * have to be careful to copy the data to the right
+ * location.
+ */
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+ wqe, psn, pmtu);
+ goto read_last;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /* ACKs READ req. */
+ if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
+ goto ack_seq_err;
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /*
+ * Check that the data size is >= 1 && <= pmtu.
+ * Remember to account for the AETH header (4) and
+ * ICRC (4).
+ */
+ if (unlikely(tlen <= (hdrsize + pad + 8)))
+ goto ack_len_err;
+read_last:
+ tlen -= hdrsize + pad + 8;
+ if (unlikely(tlen != qp->s_rdma_read_len))
+ goto ack_len_err;
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
+ WARN_ON(qp->s_rdma_read_sge.num_sge);
+ (void) do_rc_ack(qp, aeth, psn,
+ OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
+ goto ack_done;
+ }
+
+ack_op_err:
+ status = IB_WC_LOC_QP_OP_ERR;
+ goto ack_err;
+
+ack_seq_err:
+ rdma_seq_err(qp, ibp, psn, rcd);
+ goto ack_done;
+
+ack_len_err:
+ status = IB_WC_LOC_LEN_ERR;
+ack_err:
+ if (qp->s_last == qp->s_acked) {
+ qib_send_complete(qp, wqe, status);
+ qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
+ack_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+bail:
+ return;
+}
+
+/**
+ * qib_rc_rcv_error - process an incoming duplicate or error RC packet
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @diff: the difference between the PSN and the expected PSN
+ *
+ * This is called from qib_rc_rcv() to process an unexpected
+ * incoming RC packet for the given QP.
+ * Called at interrupt level.
+ * Return 1 if no more processing is needed; otherwise return 0 to
+ * schedule a response to be sent.
+ */
+static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
+ void *data,
+ struct qib_qp *qp,
+ u32 opcode,
+ u32 psn,
+ int diff,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_ack_entry *e;
+ unsigned long flags;
+ u8 i, prev;
+ int old_req;
+
+ if (diff > 0) {
+ /*
+ * Packet sequence error.
+ * A NAK will ACK earlier sends and RDMA writes.
+ * Don't queue the NAK if we already sent one.
+ */
+ if (!qp->r_nak_state) {
+ ibp->n_rc_seqnak++;
+ qp->r_nak_state = IB_NAK_PSN_ERROR;
+ /* Use the expected PSN. */
+ qp->r_ack_psn = qp->r_psn;
+ /*
+ * Wait to send the sequence NAK until all packets
+ * in the receive queue have been processed.
+ * Otherwise, we end up propagating congestion.
+ */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ }
+ goto done;
+ }
+
+ /*
+ * Handle a duplicate request. Don't re-execute SEND, RDMA
+ * write or atomic op. Don't NAK errors, just silently drop
+ * the duplicate request. Note that r_sge, r_len, and
+ * r_rcv_len may be in use so don't modify them.
+ *
+ * We are supposed to ACK the earliest duplicate PSN but we
+ * can coalesce an outstanding duplicate ACK. We have to
+ * send the earliest so that RDMA reads can be restarted at
+ * the requester's expected PSN.
+ *
+ * First, find where this duplicate PSN falls within the
+ * ACKs previously sent.
+ * old_req is true if there is an older response that is scheduled
+ * to be sent before sending this one.
+ */
+ e = NULL;
+ old_req = 1;
+ ibp->n_rc_dupreq++;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this now that we hold the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto unlock_done;
+
+ for (i = qp->r_head_ack_queue; ; i = prev) {
+ if (i == qp->s_tail_ack_queue)
+ old_req = 0;
+ if (i)
+ prev = i - 1;
+ else
+ prev = QIB_MAX_RDMA_ATOMIC;
+ if (prev == qp->r_head_ack_queue) {
+ e = NULL;
+ break;
+ }
+ e = &qp->s_ack_queue[prev];
+ if (!e->opcode) {
+ e = NULL;
+ break;
+ }
+ if (qib_cmp24(psn, e->psn) >= 0) {
+ if (prev == qp->s_tail_ack_queue &&
+ qib_cmp24(psn, e->lpsn) <= 0)
+ old_req = 0;
+ break;
+ }
+ }
+ switch (opcode) {
+ case OP(RDMA_READ_REQUEST): {
+ struct ib_reth *reth;
+ u32 offset;
+ u32 len;
+
+ /*
+ * If we didn't find the RDMA read request in the ack queue,
+ * we can ignore this request.
+ */
+ if (!e || e->opcode != OP(RDMA_READ_REQUEST))
+ goto unlock_done;
+ /* RETH comes after BTH */
+ reth = &ohdr->u.rc.reth;
+ /*
+ * Address range must be a subset of the original
+ * request and start on pmtu boundaries.
+ * We reuse the old ack_queue slot since the requester
+ * should not back up and request an earlier PSN for the
+ * same request.
+ */
+ offset = ((psn - e->psn) & QIB_PSN_MASK) *
+ ib_mtu_enum_to_int(qp->path_mtu);
+ len = be32_to_cpu(reth->length);
+ if (unlikely(offset + len != e->rdma_sge.sge_length))
+ goto unlock_done;
+ if (e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ if (len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+ IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto unlock_done;
+ } else {
+ e->rdma_sge.vaddr = NULL;
+ e->rdma_sge.length = 0;
+ e->rdma_sge.sge_length = 0;
+ }
+ e->psn = psn;
+ if (old_req)
+ goto unlock_done;
+ qp->s_tail_ack_queue = prev;
+ break;
+ }
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD): {
+ /*
+ * If we didn't find the atomic request in the ack queue
+ * or the send tasklet is already backed up to send an
+ * earlier entry, we can ignore this request.
+ */
+ if (!e || e->opcode != (u8) opcode || old_req)
+ goto unlock_done;
+ qp->s_tail_ack_queue = prev;
+ break;
+ }
+
+ default:
+ /*
+ * Ignore this operation if it doesn't request an ACK
+ * or an earlier RDMA read or atomic is going to be resent.
+ */
+ if (!(psn & IB_BTH_REQ_ACK) || old_req)
+ goto unlock_done;
+ /*
+ * Resend the most recent ACK if this request is
+ * after all the previous RDMA reads and atomics.
+ */
+ if (i == qp->r_head_ack_queue) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->r_nak_state = 0;
+ qp->r_ack_psn = qp->r_psn - 1;
+ goto send_ack;
+ }
+ /*
+ * Try to send a simple ACK to work around a Mellanox bug
+ * which doesn't accept a RDMA read response or atomic
+ * response as an ACK for earlier SENDs or RDMA writes.
+ */
+ if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->r_nak_state = 0;
+ qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
+ goto send_ack;
+ }
+ /*
+ * Resend the RDMA read or atomic op which
+ * ACKs this duplicate request.
+ */
+ qp->s_tail_ack_queue = i;
+ break;
+ }
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ qp->s_flags |= QIB_S_RESP_PENDING;
+ qp->r_nak_state = 0;
+ qib_schedule_send(qp);
+
+unlock_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+ return 1;
+
+send_ack:
+ return 0;
+}
+
+void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
+{
+ unsigned long flags;
+ int lastwqe;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ lastwqe = qib_error_qp(qp, err);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+}
+
+static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
+{
+ unsigned next;
+
+ next = n + 1;
+ if (next > QIB_MAX_RDMA_ATOMIC)
+ next = 0;
+ qp->s_tail_ack_queue = next;
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+}
+
+/**
+ * qib_rc_rcv - process an incoming RC packet
+ * @rcd: the context pointer
+ * @hdr: the header of this packet
+ * @has_grh: true if the header has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ *
+ * This is called from qib_qp_rcv() to process an incoming RC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = &rcd->ppd->ibport_data;
+ struct qib_other_headers *ohdr;
+ u32 opcode;
+ u32 hdrsize;
+ u32 psn;
+ u32 pad;
+ struct ib_wc wc;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ int diff;
+ struct ib_reth *reth;
+ unsigned long flags;
+ int ret;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12; /* LRH + BTH */
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
+ }
+
+ opcode = be32_to_cpu(ohdr->bth[0]);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+ goto sunlock;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ opcode >>= 24;
+
+ /* Prevent simultaneous processing after APM on different CPUs */
+ spin_lock(&qp->r_lock);
+
+ /*
+ * Process responses (ACKs) before anything else. Note that the
+ * packet sequence number will be for something in the send work
+ * queue rather than the expected receive packet sequence number.
+ * In other words, this QP is the requester.
+ */
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
+ hdrsize, pmtu, rcd);
+ goto runlock;
+ }
+
+ /* Compute 24 bits worth of difference. */
+ diff = qib_cmp24(psn, qp->r_psn);
+ if (unlikely(diff)) {
+ if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
+ goto runlock;
+ goto send_ack;
+ }
+
+ /* Check for opcode sequence errors. */
+ switch (qp->r_state) {
+ case OP(SEND_FIRST):
+ case OP(SEND_MIDDLE):
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ break;
+ goto nack_inv;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_MIDDLE):
+ if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ break;
+ goto nack_inv;
+
+ default:
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ goto nack_inv;
+ /*
+ * Note that it is up to the requester to not send a new
+ * RDMA read or atomic operation before receiving an ACK
+ * for the previous operation.
+ */
+ break;
+ }
+
+ memset(&wc, 0, sizeof wc);
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
+ qp->r_flags |= QIB_R_COMM_EST;
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_COMM_EST;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ }
+
+ /* OK, process the packet. */
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ qp->r_rcv_len = 0;
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ case OP(RDMA_WRITE_MIDDLE):
+send_middle:
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto nack_inv;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto nack_inv;
+ qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ break;
+
+ case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ /* consume RWQE */
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ goto send_last_imm;
+
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ qp->r_rcv_len = 0;
+ if (opcode == OP(SEND_ONLY))
+ goto send_last;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST_WITH_IMMEDIATE):
+send_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST):
+ case OP(RDMA_WRITE_LAST):
+send_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto nack_inv;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ wc.byte_len = tlen + qp->r_rcv_len;
+ if (unlikely(wc.byte_len > qp->r_len))
+ goto nack_inv;
+ qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ qp->r_msn++;
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ break;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto nack_inv;
+ /* consume RWQE */
+ reth = &ohdr->u.rc.reth;
+ hdrsize += sizeof(*reth);
+ qp->r_len = be32_to_cpu(reth->length);
+ qp->r_rcv_len = 0;
+ qp->r_sge.sg_list = NULL;
+ if (qp->r_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
+ rkey, IB_ACCESS_REMOTE_WRITE);
+ if (unlikely(!ok))
+ goto nack_acc;
+ qp->r_sge.num_sge = 1;
+ } else {
+ qp->r_sge.num_sge = 0;
+ qp->r_sge.sge.mr = NULL;
+ qp->r_sge.sge.vaddr = NULL;
+ qp->r_sge.sge.length = 0;
+ qp->r_sge.sge.sge_length = 0;
+ }
+ if (opcode == OP(RDMA_WRITE_FIRST))
+ goto send_middle;
+ else if (opcode == OP(RDMA_WRITE_ONLY))
+ goto send_last;
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ goto send_last_imm;
+
+ case OP(RDMA_READ_REQUEST): {
+ struct qib_ack_entry *e;
+ u32 len;
+ u8 next;
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto nack_inv;
+ next = qp->r_head_ack_queue + 1;
+ /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
+ if (next > QIB_MAX_RDMA_ATOMIC)
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this while holding the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto srunlock;
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlck;
+ qib_update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ reth = &ohdr->u.rc.reth;
+ len = be32_to_cpu(reth->length);
+ if (len) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
+ rkey, IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto nack_acc_unlck;
+ /*
+ * Update the next expected PSN. We add 1 later
+ * below, so only add the remainder here.
+ */
+ if (len > pmtu)
+ qp->r_psn += (len - 1) / pmtu;
+ } else {
+ e->rdma_sge.mr = NULL;
+ e->rdma_sge.vaddr = NULL;
+ e->rdma_sge.length = 0;
+ e->rdma_sge.sge_length = 0;
+ }
+ e->opcode = opcode;
+ e->sent = 0;
+ e->psn = psn;
+ e->lpsn = qp->r_psn;
+ /*
+ * We need to increment the MSN here instead of when we
+ * finish sending the result since a duplicate request would
+ * increment it more than once.
+ */
+ qp->r_msn++;
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_nak_state = 0;
+ qp->r_head_ack_queue = next;
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= QIB_S_RESP_PENDING;
+ qib_schedule_send(qp);
+
+ goto srunlock;
+ }
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD): {
+ struct ib_atomic_eth *ateth;
+ struct qib_ack_entry *e;
+ u64 vaddr;
+ atomic64_t *maddr;
+ u64 sdata;
+ u32 rkey;
+ u8 next;
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_inv;
+ next = qp->r_head_ack_queue + 1;
+ if (next > QIB_MAX_RDMA_ATOMIC)
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this while holding the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto srunlock;
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlck;
+ qib_update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ ateth = &ohdr->u.atomic_eth;
+ vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
+ be32_to_cpu(ateth->vaddr[1]);
+ if (unlikely(vaddr & (sizeof(u64) - 1)))
+ goto nack_inv_unlck;
+ rkey = be32_to_cpu(ateth->rkey);
+ /* Check rkey & NAK */
+ if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ vaddr, rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_acc_unlck;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+ sdata = be64_to_cpu(ateth->swap_data);
+ e->atomic_data = (opcode == OP(FETCH_ADD)) ?
+ (u64) atomic64_add_return(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ be64_to_cpu(ateth->compare_data),
+ sdata);
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ qp->r_sge.num_sge = 0;
+ e->opcode = opcode;
+ e->sent = 0;
+ e->psn = psn;
+ e->lpsn = psn;
+ qp->r_msn++;
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_nak_state = 0;
+ qp->r_head_ack_queue = next;
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= QIB_S_RESP_PENDING;
+ qib_schedule_send(qp);
+
+ goto srunlock;
+ }
+
+ default:
+ /* NAK unknown opcodes. */
+ goto nack_inv;
+ }
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_ack_psn = psn;
+ qp->r_nak_state = 0;
+ /* Send an ACK if requested or required. */
+ if (psn & (1 << 31))
+ goto send_ack;
+ goto runlock;
+
+rnr_nak:
+ qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue RNR NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ goto runlock;
+
+nack_op_err:
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ goto runlock;
+
+nack_inv_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_inv:
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ goto runlock;
+
+nack_acc_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_acc:
+ qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+send_ack:
+ qib_send_rc_ack(qp);
+runlock:
+ spin_unlock(&qp->r_lock);
+ return;
+
+srunlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock(&qp->r_lock);
+ return;
+
+sunlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
new file mode 100644
index 000000000000..eb78d9367f06
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -0,0 +1,817 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+/*
+ * Convert the AETH RNR timeout code into the number of microseconds.
+ */
+const u32 ib_qib_rnr_table[32] = {
+ 655360, /* 00: 655.36 */
+ 10, /* 01: .01 */
+ 20, /* 02 .02 */
+ 30, /* 03: .03 */
+ 40, /* 04: .04 */
+ 60, /* 05: .06 */
+ 80, /* 06: .08 */
+ 120, /* 07: .12 */
+ 160, /* 08: .16 */
+ 240, /* 09: .24 */
+ 320, /* 0A: .32 */
+ 480, /* 0B: .48 */
+ 640, /* 0C: .64 */
+ 960, /* 0D: .96 */
+ 1280, /* 0E: 1.28 */
+ 1920, /* 0F: 1.92 */
+ 2560, /* 10: 2.56 */
+ 3840, /* 11: 3.84 */
+ 5120, /* 12: 5.12 */
+ 7680, /* 13: 7.68 */
+ 10240, /* 14: 10.24 */
+ 15360, /* 15: 15.36 */
+ 20480, /* 16: 20.48 */
+ 30720, /* 17: 30.72 */
+ 40960, /* 18: 40.96 */
+ 61440, /* 19: 61.44 */
+ 81920, /* 1A: 81.92 */
+ 122880, /* 1B: 122.88 */
+ 163840, /* 1C: 163.84 */
+ 245760, /* 1D: 245.76 */
+ 327680, /* 1E: 327.68 */
+ 491520 /* 1F: 491.52 */
+};
+
+/*
+ * Validate a RWQE and fill in the SGE state.
+ * Return 1 if OK.
+ */
+static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
+{
+ int i, j, ret;
+ struct ib_wc wc;
+ struct qib_lkey_table *rkt;
+ struct qib_pd *pd;
+ struct qib_sge_state *ss;
+
+ rkt = &to_idev(qp->ibqp.device)->lk_table;
+ pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+ ss = &qp->r_sge;
+ ss->sg_list = qp->r_sg_list;
+ qp->r_len = 0;
+ for (i = j = 0; i < wqe->num_sge; i++) {
+ if (wqe->sg_list[i].length == 0)
+ continue;
+ /* Check LKEY */
+ if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
+ goto bad_lkey;
+ qp->r_len += wqe->sg_list[i].length;
+ j++;
+ }
+ ss->num_sge = j;
+ ss->total_len = qp->r_len;
+ ret = 1;
+ goto bail;
+
+bad_lkey:
+ while (j) {
+ struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ ss->num_sge = 0;
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr_id;
+ wc.status = IB_WC_LOC_PROT_ERR;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ /* Signal solicited completion event. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ ret = 0;
+bail:
+ return ret;
+}
+
+/**
+ * qib_get_rwqe - copy the next RWQE into the QP's RWQE
+ * @qp: the QP
+ * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
+ *
+ * Return -1 if there is a local error, 0 if no RWQE is available,
+ * otherwise return 1.
+ *
+ * Can be called from interrupt level.
+ */
+int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
+{
+ unsigned long flags;
+ struct qib_rq *rq;
+ struct qib_rwq *wq;
+ struct qib_srq *srq;
+ struct qib_rwqe *wqe;
+ void (*handler)(struct ib_event *, void *);
+ u32 tail;
+ int ret;
+
+ if (qp->ibqp.srq) {
+ srq = to_isrq(qp->ibqp.srq);
+ handler = srq->ibsrq.event_handler;
+ rq = &srq->rq;
+ } else {
+ srq = NULL;
+ handler = NULL;
+ rq = &qp->r_rq;
+ }
+
+ spin_lock_irqsave(&rq->lock, flags);
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ wq = rq->wq;
+ tail = wq->tail;
+ /* Validate tail before using it since it is user writable. */
+ if (tail >= rq->size)
+ tail = 0;
+ if (unlikely(tail == wq->head)) {
+ ret = 0;
+ goto unlock;
+ }
+ /* Make sure entry is read after head index is read. */
+ smp_rmb();
+ wqe = get_rwqe_ptr(rq, tail);
+ /*
+ * Even though we update the tail index in memory, the verbs
+ * consumer is not supposed to post more entries until a
+ * completion is generated.
+ */
+ if (++tail >= rq->size)
+ tail = 0;
+ wq->tail = tail;
+ if (!wr_id_only && !qib_init_sge(qp, wqe)) {
+ ret = -1;
+ goto unlock;
+ }
+ qp->r_wr_id = wqe->wr_id;
+
+ ret = 1;
+ set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
+ if (handler) {
+ u32 n;
+
+ /*
+ * Validate head pointer value and compute
+ * the number of remaining WQEs.
+ */
+ n = wq->head;
+ if (n >= rq->size)
+ n = 0;
+ if (n < tail)
+ n += rq->size - tail;
+ else
+ n -= tail;
+ if (n < srq->limit) {
+ struct ib_event ev;
+
+ srq->limit = 0;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ handler(&ev, srq->ibsrq.srq_context);
+ goto bail;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+bail:
+ return ret;
+}
+
+/*
+ * Switch to alternate path.
+ * The QP s_lock should be held and interrupts disabled.
+ */
+void qib_migrate_qp(struct qib_qp *qp)
+{
+ struct ib_event ev;
+
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = qp->alt_ah_attr.port_num;
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+}
+
+static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
+{
+ if (!index) {
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ return ppd->guid;
+ } else
+ return ibp->guids[index - 1];
+}
+
+static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
+{
+ return (gid->global.interface_id == id &&
+ (gid->global.subnet_prefix == gid_prefix ||
+ gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
+}
+
+/*
+ *
+ * This should be called with the QP s_lock held.
+ */
+int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, struct qib_qp *qp, u32 bth0)
+{
+ __be64 guid;
+
+ if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
+ if (!has_grh) {
+ if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
+ goto err;
+ } else {
+ if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
+ goto err;
+ guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
+ if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ goto err;
+ if (!gid_ok(&hdr->u.l.grh.sgid,
+ qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
+ qp->alt_ah_attr.grh.dgid.global.interface_id))
+ goto err;
+ }
+ if (!qib_pkey_ok((u16)bth0,
+ qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ (u16)bth0,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ 0, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto err;
+ }
+ /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
+ if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
+ ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
+ goto err;
+ qib_migrate_qp(qp);
+ } else {
+ if (!has_grh) {
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ goto err;
+ } else {
+ if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
+ goto err;
+ guid = get_sguid(ibp,
+ qp->remote_ah_attr.grh.sgid_index);
+ if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ goto err;
+ if (!gid_ok(&hdr->u.l.grh.sgid,
+ qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
+ qp->remote_ah_attr.grh.dgid.global.interface_id))
+ goto err;
+ }
+ if (!qib_pkey_ok((u16)bth0,
+ qib_get_pkey(ibp, qp->s_pkey_index))) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ (u16)bth0,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ 0, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto err;
+ }
+ /* Validate the SLID. See Ch. 9.6.1.5 */
+ if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
+ ppd_from_ibp(ibp)->port != qp->port_num)
+ goto err;
+ if (qp->s_mig_state == IB_MIG_REARM &&
+ !(bth0 & IB_BTH_MIG_REQ))
+ qp->s_mig_state = IB_MIG_ARMED;
+ }
+
+ return 0;
+
+err:
+ return 1;
+}
+
+/**
+ * qib_ruc_loopback - handle UC and RC lookback requests
+ * @sqp: the sending QP
+ *
+ * This is called from qib_do_send() to
+ * forward a WQE addressed to the same HCA.
+ * Note that although we are single threaded due to the tasklet, we still
+ * have to protect against post_send(). We don't have to worry about
+ * receive interrupts since this is a connected protocol and all packets
+ * will pass through here.
+ */
+static void qib_ruc_loopback(struct qib_qp *sqp)
+{
+ struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
+ struct qib_qp *qp;
+ struct qib_swqe *wqe;
+ struct qib_sge *sge;
+ unsigned long flags;
+ struct ib_wc wc;
+ u64 sdata;
+ atomic64_t *maddr;
+ enum ib_wc_status send_status;
+ int release;
+ int ret;
+
+ /*
+ * Note that we check the responder QP state after
+ * checking the requester's state.
+ */
+ qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
+
+ spin_lock_irqsave(&sqp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
+ !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ goto unlock;
+
+ sqp->s_flags |= QIB_S_BUSY;
+
+again:
+ if (sqp->s_last == sqp->s_head)
+ goto clr_busy;
+ wqe = get_swqe_ptr(sqp, sqp->s_last);
+
+ /* Return if it is not OK to start a new work reqeust. */
+ if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
+ goto clr_busy;
+ /* We are in the error state, flush the work request. */
+ send_status = IB_WC_WR_FLUSH_ERR;
+ goto flush_send;
+ }
+
+ /*
+ * We can rely on the entry not changing without the s_lock
+ * being held until we update s_last.
+ * We increment s_cur to indicate s_last is in progress.
+ */
+ if (sqp->s_last == sqp->s_cur) {
+ if (++sqp->s_cur >= sqp->s_size)
+ sqp->s_cur = 0;
+ }
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+
+ if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
+ qp->ibqp.qp_type != sqp->ibqp.qp_type) {
+ ibp->n_pkt_drops++;
+ /*
+ * For RC, the requester would timeout and retry so
+ * shortcut the timeouts and just signal too many retries.
+ */
+ if (sqp->ibqp.qp_type == IB_QPT_RC)
+ send_status = IB_WC_RETRY_EXC_ERR;
+ else
+ send_status = IB_WC_SUCCESS;
+ goto serr;
+ }
+
+ memset(&wc, 0, sizeof wc);
+ send_status = IB_WC_SUCCESS;
+
+ release = 1;
+ sqp->s_sge.sge = wqe->sg_list[0];
+ sqp->s_sge.sg_list = wqe->sg_list + 1;
+ sqp->s_sge.num_sge = wqe->wr.num_sge;
+ sqp->s_len = wqe->length;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ /* FALLTHROUGH */
+ case IB_WR_SEND:
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ /* FALLTHROUGH */
+ case IB_WR_RDMA_WRITE:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ if (wqe->length == 0)
+ break;
+ if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ wqe->wr.wr.rdma.remote_addr,
+ wqe->wr.wr.rdma.rkey,
+ IB_ACCESS_REMOTE_WRITE)))
+ goto acc_err;
+ qp->r_sge.sg_list = NULL;
+ qp->r_sge.num_sge = 1;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_RDMA_READ:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto inv_err;
+ if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ wqe->wr.wr.rdma.remote_addr,
+ wqe->wr.wr.rdma.rkey,
+ IB_ACCESS_REMOTE_READ)))
+ goto acc_err;
+ release = 0;
+ sqp->s_sge.sg_list = NULL;
+ sqp->s_sge.num_sge = 1;
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->wr.num_sge;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto inv_err;
+ if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ wqe->wr.wr.atomic.remote_addr,
+ wqe->wr.wr.atomic.rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+ sdata = wqe->wr.wr.atomic.compare_add;
+ *(u64 *) sqp->s_sge.sge.vaddr =
+ (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+ (u64) atomic64_add_return(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ sdata, wqe->wr.wr.atomic.swap);
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ qp->r_sge.num_sge = 0;
+ goto send_comp;
+
+ default:
+ send_status = IB_WC_LOC_QP_OP_ERR;
+ goto serr;
+ }
+
+ sge = &sqp->s_sge.sge;
+ while (sqp->s_len) {
+ u32 len = sqp->s_len;
+
+ if (len > sge->length)
+ len = sge->length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (!release)
+ atomic_dec(&sge->mr->refcount);
+ if (--sqp->s_sge.num_sge)
+ *sge = *sqp->s_sge.sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ sqp->s_len -= len;
+ }
+ if (release)
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ goto send_comp;
+
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ wc.port_num = 1;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ wqe->wr.send_flags & IB_SEND_SOLICITED);
+
+send_comp:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ ibp->n_loop_pkts++;
+flush_send:
+ sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+ qib_send_complete(sqp, wqe, send_status);
+ goto again;
+
+rnr_nak:
+ /* Handle RNR NAK */
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ goto send_comp;
+ ibp->n_rnr_naks++;
+ /*
+ * Note: we don't need the s_lock held since the BUSY flag
+ * makes this single threaded.
+ */
+ if (sqp->s_rnr_retry == 0) {
+ send_status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto serr;
+ }
+ if (sqp->s_rnr_retry_cnt < 7)
+ sqp->s_rnr_retry--;
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
+ goto clr_busy;
+ sqp->s_flags |= QIB_S_WAIT_RNR;
+ sqp->s_timer.function = qib_rc_rnr_retry;
+ sqp->s_timer.expires = jiffies +
+ usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
+ add_timer(&sqp->s_timer);
+ goto clr_busy;
+
+op_err:
+ send_status = IB_WC_REM_OP_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+inv_err:
+ send_status = IB_WC_REM_INV_REQ_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+acc_err:
+ send_status = IB_WC_REM_ACCESS_ERR;
+ wc.status = IB_WC_LOC_PROT_ERR;
+err:
+ /* responder goes to error state */
+ qib_rc_error(qp, wc.status);
+
+serr:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ qib_send_complete(sqp, wqe, send_status);
+ if (sqp->ibqp.qp_type == IB_QPT_RC) {
+ int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+
+ sqp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = sqp->ibqp.device;
+ ev.element.qp = &sqp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
+ }
+ goto done;
+ }
+clr_busy:
+ sqp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+done:
+ if (qp && atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+/**
+ * qib_make_grh - construct a GRH header
+ * @ibp: a pointer to the IB port
+ * @hdr: a pointer to the GRH header being constructed
+ * @grh: the global route address to send to
+ * @hwords: the number of 32 bit words of header being sent
+ * @nwords: the number of 32 bit words of data being sent
+ *
+ * Return the size of the header in 32 bit words.
+ */
+u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
+ struct ib_global_route *grh, u32 hwords, u32 nwords)
+{
+ hdr->version_tclass_flow =
+ cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
+ (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
+ (grh->flow_label << IB_GRH_FLOW_SHIFT));
+ hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
+ /* next_hdr is defined by C8-7 in ch. 8.4.1 */
+ hdr->next_hdr = IB_GRH_NEXT_HDR;
+ hdr->hop_limit = grh->hop_limit;
+ /* The SGID is 32-bit aligned. */
+ hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
+ hdr->sgid.global.interface_id = grh->sgid_index ?
+ ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
+ hdr->dgid = grh->dgid;
+
+ /* GRH header size in 32-bit words. */
+ return sizeof(struct ib_grh) / sizeof(u32);
+}
+
+void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+ u32 bth0, u32 bth2)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ u16 lrh0;
+ u32 nwords;
+ u32 extra_bytes;
+
+ /* Construct the header. */
+ extra_bytes = -qp->s_cur_size & 3;
+ nwords = (qp->s_cur_size + extra_bytes) >> 2;
+ lrh0 = QIB_LRH_BTH;
+ if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
+ &qp->remote_ah_attr.grh,
+ qp->s_hdrwords, nwords);
+ lrh0 = QIB_LRH_GRH;
+ }
+ lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
+ qp->remote_ah_attr.sl << 4;
+ qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+ qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
+ qp->remote_ah_attr.src_path_bits);
+ bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
+ bth0 |= extra_bytes << 20;
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth0 |= IB_BTH_MIG_REQ;
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(bth2);
+}
+
+/**
+ * qib_do_send - perform a send on a QP
+ * @work: contains a pointer to the QP
+ *
+ * Process entries in the send work queue until credit or queue is
+ * exhausted. Only allow one CPU to send a packet per QP (tasklet).
+ * Otherwise, two threads could send packets out of order.
+ */
+void qib_do_send(struct work_struct *work)
+{
+ struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ int (*make_req)(struct qib_qp *qp);
+ unsigned long flags;
+
+ if ((qp->ibqp.qp_type == IB_QPT_RC ||
+ qp->ibqp.qp_type == IB_QPT_UC) &&
+ (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
+ qib_ruc_loopback(qp);
+ return;
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ make_req = qib_make_rc_req;
+ else if (qp->ibqp.qp_type == IB_QPT_UC)
+ make_req = qib_make_uc_req;
+ else
+ make_req = qib_make_ud_req;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if (!qib_send_ok(qp)) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
+ }
+
+ qp->s_flags |= QIB_S_BUSY;
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ do {
+ /* Check for a constructed packet to be sent. */
+ if (qp->s_hdrwords != 0) {
+ /*
+ * If the packet cannot be sent now, return and
+ * the send tasklet will be woken up later.
+ */
+ if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
+ qp->s_cur_sge, qp->s_cur_size))
+ break;
+ /* Record that s_hdr is empty. */
+ qp->s_hdrwords = 0;
+ }
+ } while (make_req(qp));
+}
+
+/*
+ * This should be called with s_lock held.
+ */
+void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+ enum ib_wc_status status)
+{
+ u32 old_last, last;
+ unsigned i;
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ return;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
+
+ /* See ch. 11.2.4.1 and 10.7.3.1 */
+ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ status != IB_WC_SUCCESS) {
+ struct ib_wc wc;
+
+ memset(&wc, 0, sizeof wc);
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = status;
+ wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+ wc.qp = &qp->ibqp;
+ if (status == IB_WC_SUCCESS)
+ wc.byte_len = wqe->length;
+ qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
+ status != IB_WC_SUCCESS);
+ }
+
+ last = qp->s_last;
+ old_last = last;
+ if (++last >= qp->s_size)
+ last = 0;
+ qp->s_last = last;
+ if (qp->s_acked == old_last)
+ qp->s_acked = last;
+ if (qp->s_cur == old_last)
+ qp->s_cur = last;
+ if (qp->s_tail == old_last)
+ qp->s_tail = last;
+ if (qp->state == IB_QPS_SQD && last == qp->s_cur)
+ qp->s_draining = 0;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 2a68d9f624dd..0aeed0e74cb6 100644
--- a/drivers/infiniband/hw/ipath/ipath_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -32,22 +32,40 @@
*/
/*
* This file contains all of the code that is specific to the SerDes
- * on the InfiniPath 7220 chip.
+ * on the QLogic_IB 7220 chip.
*/
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
+#include "qib.h"
+#include "qib_7220.h"
+
+/*
+ * Same as in qib_iba7220.c, but just the registers needed here.
+ * Could move whole set to qib_7220.h, but decided better to keep
+ * local.
+ */
+#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+/* these are used only here, not in qib_iba7220.c */
+#define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
+#define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
+#define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
+#define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
+#define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
/*
* The IBSerDesMappTable is a memory that holds values to be stored in
- * various SerDes registers by IBC. It is not part of the normal kregs
- * map and is used in exactly one place, hence the #define below.
+ * various SerDes registers by IBC.
*/
-#define KR_IBSerDesMappTable (0x94000 / (sizeof(uint64_t)))
+#define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
/*
* Below used for sdnum parameter, selecting one of the two sections
@@ -71,42 +89,37 @@
#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
/* Forward declarations. */
-static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
- u32 data, u32 mask);
-static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
+static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
+ u32 data, u32 mask);
+static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
int mask);
-static int ipath_sd_trimdone_poll(struct ipath_devdata *dd);
-static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
- const char *where);
-static int ipath_sd_setvals(struct ipath_devdata *dd);
-static int ipath_sd_early(struct ipath_devdata *dd);
-static int ipath_sd_dactrim(struct ipath_devdata *dd);
-/* Set the registers that IBC may muck with to their default "preset" values */
-int ipath_sd7220_presets(struct ipath_devdata *dd);
-static int ipath_internal_presets(struct ipath_devdata *dd);
+static int qib_sd_trimdone_poll(struct qib_devdata *dd);
+static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
+static int qib_sd_setvals(struct qib_devdata *dd);
+static int qib_sd_early(struct qib_devdata *dd);
+static int qib_sd_dactrim(struct qib_devdata *dd);
+static int qib_internal_presets(struct qib_devdata *dd);
/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
-static int ipath_sd_trimself(struct ipath_devdata *dd, int val);
-static int epb_access(struct ipath_devdata *dd, int sdnum, int claim);
-
-void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup);
+static int qib_sd_trimself(struct qib_devdata *dd, int val);
+static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
/*
* Below keeps track of whether the "once per power-on" initialization has
* been done, because uC code Version 1.32.17 or higher allows the uC to
* be reset at will, and Automatic Equalization may require it. So the
- * state of the reset "pin", as reflected in was_reset parameter to
- * ipath_sd7220_init() is no longer valid. Instead, we check for the
+ * state of the reset "pin", is no longer valid. Instead, we check for the
* actual uC code having been loaded.
*/
-static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
+static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd)
{
- if (!dd->serdes_first_init_done && (ipath_sd7220_ib_vfy(dd) > 0))
- dd->serdes_first_init_done = 1;
- return dd->serdes_first_init_done;
+ struct qib_devdata *dd = ppd->dd;
+ if (!dd->cspec->serdes_first_init_done && (qib_sd7220_ib_vfy(dd) > 0))
+ dd->cspec->serdes_first_init_done = 1;
+ return dd->cspec->serdes_first_init_done;
}
-/* repeat #define for local use. "Real" #define is in ipath_iba7220.c */
-#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
+/* repeat #define for local use. "Real" #define is in qib_iba7220.c */
+#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
#define UC_PAR_CLR_D 8
@@ -114,25 +127,25 @@ static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
-void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd)
+void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
{
int ret;
/* clear, then re-enable parity errs */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
UC_PAR_CLR_D, UC_PAR_CLR_M);
if (ret < 0) {
- ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
+ qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
goto bail;
}
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
UC_PAR_CLR_M);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(4);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_write_kreg(dd, kr_hwerrclear,
+ QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
+ qib_read_kreg32(dd, kr_scratch);
bail:
return;
}
@@ -146,7 +159,7 @@ bail:
#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
-static int ipath_resync_ibepb(struct ipath_devdata *dd)
+static int qib_resync_ibepb(struct qib_devdata *dd)
{
int ret, pat, tries, chn;
u32 loc;
@@ -155,43 +168,42 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd)
chn = 0;
for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
loc = IB_PGUDP(chn);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
- ipath_dev_err(dd, "Failed read in resync\n");
+ qib_dev_err(dd, "Failed read in resync\n");
continue;
}
if (ret != 0xF0 && ret != 0x55 && tries == 0)
- ipath_dev_err(dd, "unexpected pattern in resync\n");
+ qib_dev_err(dd, "unexpected pattern in resync\n");
pat = ret ^ 0xA5; /* alternate F0 and 55 */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
if (ret < 0) {
- ipath_dev_err(dd, "Failed write in resync\n");
+ qib_dev_err(dd, "Failed write in resync\n");
continue;
}
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
- ipath_dev_err(dd, "Failed re-read in resync\n");
+ qib_dev_err(dd, "Failed re-read in resync\n");
continue;
}
if (ret != pat) {
- ipath_dev_err(dd, "Failed compare1 in resync\n");
+ qib_dev_err(dd, "Failed compare1 in resync\n");
continue;
}
loc = IB_CMUDONE(chn);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
- ipath_dev_err(dd, "Failed CMUDONE rd in resync\n");
+ qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
continue;
}
if ((ret & 0x70) != ((chn << 4) | 0x40)) {
- ipath_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
- ret, chn);
+ qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
+ ret, chn);
continue;
}
if (++chn == 4)
break; /* Success */
}
- ipath_cdbg(VERBOSE, "Resync in %d tries\n", tries);
return (ret > 0) ? 0 : ret;
}
@@ -199,32 +211,32 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd)
* Localize the stuff that should be done to change IB uC reset
* returns <0 for errors.
*/
-static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
+static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
{
u64 rst_val;
int ret = 0;
unsigned long flags;
- rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+ rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
if (assert_rst) {
/*
* Vendor recommends "interrupting" uC before reset, to
* minimize possible glitches.
*/
- spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+ spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
epb_access(dd, IB_7220_SERDES, 1);
rst_val |= 1ULL;
/* Squelch possible parity error from _asserting_ reset */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask &
- ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
+ qib_write_kreg(dd, kr_hwerrmask,
+ dd->cspec->hwerrmask &
+ ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
+ qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
/* flush write, delay to ensure it took effect */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(2);
/* once it's reset, can remove interrupt */
epb_access(dd, IB_7220_SERDES, -1);
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
} else {
/*
* Before we de-assert reset, we need to deal with
@@ -235,46 +247,46 @@ static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
*/
u64 val;
rst_val &= ~(1ULL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask &
- ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
+ qib_write_kreg(dd, kr_hwerrmask,
+ dd->cspec->hwerrmask &
+ ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
- ret = ipath_resync_ibepb(dd);
+ ret = qib_resync_ibepb(dd);
if (ret < 0)
- ipath_dev_err(dd, "unable to re-sync IB EPB\n");
+ qib_dev_err(dd, "unable to re-sync IB EPB\n");
/* set uC control regs to suppress parity errs */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
if (ret < 0)
goto bail;
/* IB uC code past Version 1.32.17 allow suppression of wdog */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
0x80);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set WDOG disable\n");
+ qib_dev_err(dd, "Failed to set WDOG disable\n");
goto bail;
}
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
+ qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
/* flush write, delay for startup */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(1);
/* clear, then re-enable parity errs */
- ipath_sd7220_clr_ibpar(dd);
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
- if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) {
- ipath_dev_err(dd, "IBUC Parity still set after RST\n");
- dd->ipath_hwerrmask &=
- ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
+ qib_sd7220_clr_ibpar(dd);
+ val = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
+ qib_dev_err(dd, "IBUC Parity still set after RST\n");
+ dd->cspec->hwerrmask &=
+ ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
}
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
+ qib_write_kreg(dd, kr_hwerrmask,
+ dd->cspec->hwerrmask);
}
bail:
return ret;
}
-static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
+static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
const char *where)
{
int ret, chn, baduns;
@@ -286,69 +298,71 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
/* give time for reset to settle out in EPB */
udelay(2);
- ret = ipath_resync_ibepb(dd);
+ ret = qib_resync_ibepb(dd);
if (ret < 0)
- ipath_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
+ qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
/* Do "sacrificial read" to get EPB in sane state after reset */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
if (ret < 0)
- ipath_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
+ qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
/* Check/show "summary" Trim-done bit in IBCStatus */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
- if (val & (1ULL << 11))
- ipath_cdbg(VERBOSE, "IBCS TRIMDONE set (%s)\n", where);
- else
- ipath_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
-
+ val = qib_read_kreg64(dd, kr_ibcstatus);
+ if (!(val & (1ULL << 11)))
+ qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
+ /*
+ * Do "dummy read/mod/wr" to get EPB in sane state after reset
+ * The default value for MPREG6 is 0.
+ */
udelay(2);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
if (ret < 0)
- ipath_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
+ qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
udelay(10);
baduns = 0;
for (chn = 3; chn >= 0; --chn) {
/* Read CTRL reg for each channel to check TRIMDONE */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0, 0);
if (ret < 0)
- ipath_dev_err(dd, "Failed checking TRIMDONE, chn %d"
- " (%s)\n", chn, where);
+ qib_dev_err(dd, "Failed checking TRIMDONE, chn %d"
+ " (%s)\n", chn, where);
if (!(ret & 0x10)) {
int probe;
+
baduns |= (1 << chn);
- ipath_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
+ qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
" (%s)\n", chn, ret, where);
- probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_PGUDP(0), 0, 0);
- ipath_dev_err(dd, "probe is %d (%02X)\n",
+ qib_dev_err(dd, "probe is %d (%02X)\n",
probe, probe);
- probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0, 0);
- ipath_dev_err(dd, "re-read: %d (%02X)\n",
+ qib_dev_err(dd, "re-read: %d (%02X)\n",
probe, probe);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10);
if (ret < 0)
- ipath_dev_err(dd,
+ qib_dev_err(dd,
"Err on TRIMDONE rewrite1\n");
}
}
for (chn = 3; chn >= 0; --chn) {
/* Read CTRL reg for each channel to check TRIMDONE */
if (baduns & (1 << chn)) {
- ipath_dev_err(dd,
+ qib_dev_err(dd,
"Reseting TRIMDONE on chn %d (%s)\n",
chn, where);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10);
if (ret < 0)
- ipath_dev_err(dd, "Failed re-setting "
+ qib_dev_err(dd, "Failed re-setting "
"TRIMDONE, chn %d (%s)\n",
chn, where);
}
@@ -361,96 +375,86 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
* Post IB uC code version 1.32.17, was_reset being 1 is not really
* informative, so we double-check.
*/
-int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
+int qib_sd7220_init(struct qib_devdata *dd)
{
int ret = 1; /* default to failure */
- int first_reset;
- int val_stat;
+ int first_reset, was_reset;
+ /* SERDES MPU reset recorded in D0 */
+ was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
if (!was_reset) {
/* entered with reset not asserted, we need to do it */
- ipath_ibsd_reset(dd, 1);
- ipath_sd_trimdone_monitor(dd, "Driver-reload");
+ qib_ibsd_reset(dd, 1);
+ qib_sd_trimdone_monitor(dd, "Driver-reload");
}
-
/* Substitute our deduced value for was_reset */
- ret = ipath_ibsd_ucode_loaded(dd);
- if (ret < 0) {
- ret = 1;
- goto done;
- }
- first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
+ ret = qib_ibsd_ucode_loaded(dd->pport);
+ if (ret < 0)
+ goto bail;
+ first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
/*
* Alter some regs per vendor latest doc, reset-defaults
* are not right for IB.
*/
- ret = ipath_sd_early(dd);
+ ret = qib_sd_early(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set IB SERDES early defaults\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
+ goto bail;
}
-
/*
* Set DAC manual trim IB.
* We only do this once after chip has been reset (usually
* same as once per system boot).
*/
if (first_reset) {
- ret = ipath_sd_dactrim(dd);
+ ret = qib_sd_dactrim(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed IB SERDES DAC trim\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
+ goto bail;
}
}
-
/*
* Set various registers (DDS and RXEQ) that will be
* controlled by IBC (in 1.2 mode) to reasonable preset values
* Calling the "internal" version avoids the "check for needed"
* and "trimdone monitor" that might be counter-productive.
*/
- ret = ipath_internal_presets(dd);
+ ret = qib_internal_presets(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set IB SERDES presets\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed to set IB SERDES presets\n");
+ goto bail;
}
- ret = ipath_sd_trimself(dd, 0x80);
+ ret = qib_sd_trimself(dd, 0x80);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
+ goto bail;
}
/* Load image, then try to verify */
- ret = 0; /* Assume success */
+ ret = 0; /* Assume success */
if (first_reset) {
int vfy;
int trim_done;
- ipath_dbg("SerDes uC was reset, reloading PRAM\n");
- ret = ipath_sd7220_ib_load(dd);
+
+ ret = qib_sd7220_ib_load(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to load IB SERDES image\n");
- ret = 1;
- goto done;
- }
+ qib_dev_err(dd, "Failed to load IB SERDES image\n");
+ goto bail;
+ } else {
+ /* Loaded image, try to verify */
+ vfy = qib_sd7220_ib_vfy(dd);
+ if (vfy != ret) {
+ qib_dev_err(dd, "SERDES PRAM VFY failed\n");
+ goto bail;
+ } /* end if verified */
+ } /* end if loaded */
- /* Loaded image, try to verify */
- vfy = ipath_sd7220_ib_vfy(dd);
- if (vfy != ret) {
- ipath_dev_err(dd, "SERDES PRAM VFY failed\n");
- ret = 1;
- goto done;
- }
/*
* Loaded and verified. Almost good...
* hold "success" in ret
*/
ret = 0;
-
/*
* Prev steps all worked, continue bringup
* De-assert RESET to uC, only in first reset, to allow
@@ -461,45 +465,47 @@ int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
*/
ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
if (ret < 0) {
- ipath_dev_err(dd, "Failed clearing START_EQ1\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed clearing START_EQ1\n");
+ goto bail;
}
- ipath_ibsd_reset(dd, 0);
+ qib_ibsd_reset(dd, 0);
/*
* If this is not the first reset, trimdone should be set
- * already.
+ * already. We may need to check about this.
*/
- trim_done = ipath_sd_trimdone_poll(dd);
+ trim_done = qib_sd_trimdone_poll(dd);
/*
* Whether or not trimdone succeeded, we need to put the
* uC back into reset to avoid a possible fight with the
* IBC state-machine.
*/
- ipath_ibsd_reset(dd, 1);
+ qib_ibsd_reset(dd, 1);
if (!trim_done) {
- ipath_dev_err(dd, "No TRIMDONE seen\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "No TRIMDONE seen\n");
+ goto bail;
}
-
- ipath_sd_trimdone_monitor(dd, "First-reset");
+ /*
+ * DEBUG: check each time we reset if trimdone bits have
+ * gotten cleared, and re-set them.
+ */
+ qib_sd_trimdone_monitor(dd, "First-reset");
/* Remember so we do not re-do the load, dactrim, etc. */
- dd->serdes_first_init_done = 1;
+ dd->cspec->serdes_first_init_done = 1;
}
/*
- * Setup for channel training and load values for
+ * setup for channel training and load values for
* RxEq and DDS in tables used by IBC in IB1.2 mode
*/
-
- val_stat = ipath_sd_setvals(dd);
- if (val_stat < 0)
- ret = 1;
+ ret = 0;
+ if (qib_sd_setvals(dd) >= 0)
+ goto done;
+bail:
+ ret = 1;
done:
/* start relock timer regardless, but start at 1 second */
- ipath_set_relock_poll(dd, -1);
+ set_7220_relock_poll(dd, -1);
return ret;
}
@@ -517,7 +523,7 @@ done:
* the "claim" parameter is >0 to claim, <0 to release, 0 to query.
* Returns <0 for errors, >0 if we had ownership, else 0.
*/
-static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
+static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
{
u16 acc;
u64 accval;
@@ -525,28 +531,30 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
u64 oct_sel = 0;
switch (sdnum) {
- case IB_7220_SERDES :
+ case IB_7220_SERDES:
/*
* The IB SERDES "ownership" is fairly simple. A single each
* request/grant.
*/
- acc = dd->ipath_kregs->kr_ib_epbacc;
+ acc = kr_ibsd_epb_access_ctrl;
break;
- case PCIE_SERDES0 :
- case PCIE_SERDES1 :
+
+ case PCIE_SERDES0:
+ case PCIE_SERDES1:
/* PCIe SERDES has two "octants", need to select which */
- acc = dd->ipath_kregs->kr_pcie_epbacc;
+ acc = kr_pciesd_epb_access_ctrl;
oct_sel = (2 << (sdnum - PCIE_SERDES0));
break;
- default :
+
+ default:
return 0;
}
/* Make sure any outstanding transaction was seen */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(15);
- accval = ipath_read_kreg32(dd, acc);
+ accval = qib_read_kreg32(dd, acc);
owned = !!(accval & EPB_ACC_GNT);
if (claim < 0) {
@@ -557,22 +565,22 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
* Both should be clear
*/
u64 newval = 0;
- ipath_write_kreg(dd, acc, newval);
+ qib_write_kreg(dd, acc, newval);
/* First read after write is not trustworthy */
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
udelay(5);
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
if (pollval & EPB_ACC_GNT)
owned = -1;
} else if (claim > 0) {
/* Need to claim */
u64 pollval;
u64 newval = EPB_ACC_REQ | oct_sel;
- ipath_write_kreg(dd, acc, newval);
+ qib_write_kreg(dd, acc, newval);
/* First read after write is not trustworthy */
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
udelay(5);
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
if (!(pollval & EPB_ACC_GNT))
owned = -1;
}
@@ -582,18 +590,17 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
/*
* Lemma to deal with race condition of write..read to epb regs
*/
-static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
+static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
{
int tries;
u64 transval;
-
- ipath_write_kreg(dd, reg, i_val);
+ qib_write_kreg(dd, reg, i_val);
/* Throw away first read, as RDY bit may be stale */
- transval = ipath_read_kreg64(dd, reg);
+ transval = qib_read_kreg64(dd, reg);
for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = ipath_read_kreg32(dd, reg);
+ transval = qib_read_kreg32(dd, reg);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
@@ -606,21 +613,20 @@ static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
}
/**
- *
- * ipath_sd7220_reg_mod - modify SERDES register
- * @dd: the infinipath device
+ * qib_sd7220_reg_mod - modify SERDES register
+ * @dd: the qlogic_ib device
* @sdnum: which SERDES to access
* @loc: location - channel, element, register, as packed by EPB_LOC() macro.
* @wd: Write Data - value to set in register
* @mask: ones where data should be spliced into reg.
*
- * Basic register read/modify/write, with un-needed accesses elided. That is,
+ * Basic register read/modify/write, with un-needed acesses elided. That is,
* a mask of zero will prevent write, while a mask of 0xFF will prevent read.
* returns current (presumed, if a write was done) contents of selected
* register, or <0 if errors.
*/
-static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
- u32 wd, u32 mask)
+static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
+ u32 wd, u32 mask)
{
u16 trans;
u64 transval;
@@ -629,14 +635,16 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
unsigned long flags;
switch (sdnum) {
- case IB_7220_SERDES :
- trans = dd->ipath_kregs->kr_ib_epbtrans;
+ case IB_7220_SERDES:
+ trans = kr_ibsd_epb_transaction_reg;
break;
- case PCIE_SERDES0 :
- case PCIE_SERDES1 :
- trans = dd->ipath_kregs->kr_pcie_epbtrans;
+
+ case PCIE_SERDES0:
+ case PCIE_SERDES1:
+ trans = kr_pciesd_epb_transaction_reg;
break;
- default :
+
+ default:
return -1;
}
@@ -644,23 +652,23 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
* All access is locked in software (vs other host threads) and
* hardware (vs uC access).
*/
- spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+ spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
owned = epb_access(dd, sdnum, 1);
if (owned < 0) {
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
return -1;
}
ret = 0;
for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = ipath_read_kreg32(dd, trans);
+ transval = qib_read_kreg32(dd, trans);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
}
if (tries > 0) {
- tries = 1; /* to make read-skip work */
+ tries = 1; /* to make read-skip work */
if (mask != 0xFF) {
/*
* Not a pure write, so need to read.
@@ -688,7 +696,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
else
ret = transval & EPB_DATA_MASK;
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
if (tries <= 0)
ret = -1;
return ret;
@@ -707,7 +715,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
#define EPB_RAMDATA EPB_LOC(6, 0, 5)
/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
-static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
+static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
u8 *buf, int cnt, int rd_notwr)
{
u16 trans;
@@ -723,29 +731,28 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
/* Pick appropriate transaction reg and "Chip select" for this serdes */
switch (sdnum) {
- case IB_7220_SERDES :
+ case IB_7220_SERDES:
csbit = 1ULL << EPB_IB_UC_CS_SHF;
- trans = dd->ipath_kregs->kr_ib_epbtrans;
+ trans = kr_ibsd_epb_transaction_reg;
break;
- case PCIE_SERDES0 :
- case PCIE_SERDES1 :
+
+ case PCIE_SERDES0:
+ case PCIE_SERDES1:
/* PCIe SERDES has uC "chip select" in different bit, too */
csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
- trans = dd->ipath_kregs->kr_pcie_epbtrans;
+ trans = kr_pciesd_epb_transaction_reg;
break;
- default :
+
+ default:
return -1;
}
op = rd_notwr ? "Rd" : "Wr";
- spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+ spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
owned = epb_access(dd, sdnum, 1);
if (owned < 0) {
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
- ipath_dbg("Could not get %s access to %s EPB: %X, loc %X\n",
- op, (sdnum == IB_7220_SERDES) ? "IB" : "PCIe",
- owned, loc);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
return -1;
}
@@ -758,16 +765,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
*/
addr = loc & 0x1FFF;
for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = ipath_read_kreg32(dd, trans);
+ transval = qib_read_kreg32(dd, trans);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
}
sofar = 0;
- if (tries <= 0)
- ipath_dbg("No initial RDY on EPB access request\n");
- else {
+ if (tries > 0) {
/*
* Every "memory" access is doubly-indirect.
* We set two bytes of address, then read/write
@@ -778,8 +783,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
transval = csbit | EPB_UC_CTL |
(rd_notwr ? EPB_ROM_R : EPB_ROM_W);
tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0)
- ipath_dbg("No EPB response to uC %s cmd\n", op);
while (tries > 0 && sofar < cnt) {
if (!sofar) {
/* Only set address at start of chunk */
@@ -787,18 +790,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
transval = csbit | EPB_MADDRH | addrbyte;
tries = epb_trans(dd, trans, transval,
&transval);
- if (tries <= 0) {
- ipath_dbg("No EPB response ADDRH\n");
+ if (tries <= 0)
break;
- }
addrbyte = (addr + sofar) & 0xFF;
transval = csbit | EPB_MADDRL | addrbyte;
tries = epb_trans(dd, trans, transval,
&transval);
- if (tries <= 0) {
- ipath_dbg("No EPB response ADDRL\n");
+ if (tries <= 0)
break;
- }
}
if (rd_notwr)
@@ -806,10 +805,8 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
else
transval = csbit | EPB_ROMDATA | buf[sofar];
tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0) {
- ipath_dbg("No EPB response DATA\n");
+ if (tries <= 0)
break;
- }
if (rd_notwr)
buf[sofar] = transval & EPB_DATA_MASK;
++sofar;
@@ -817,8 +814,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
/* Finally, clear control-bit for Read or Write */
transval = csbit | EPB_UC_CTL;
tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0)
- ipath_dbg("No EPB response to drop of uC %s cmd\n", op);
}
ret = sofar;
@@ -826,18 +821,16 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
if (epb_access(dd, sdnum, -1) < 0)
ret = -1;
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
- if (tries <= 0) {
- ipath_dbg("SERDES PRAM %s failed after %d bytes\n", op, sofar);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
+ if (tries <= 0)
ret = -1;
- }
return ret;
}
#define PROG_CHUNK 64
-int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
- u8 *img, int len, int offset)
+int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
+ u8 *img, int len, int offset)
{
int cnt, sofar, req;
@@ -846,7 +839,7 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
req = len - sofar;
if (req > PROG_CHUNK)
req = PROG_CHUNK;
- cnt = ipath_sd7220_ram_xfer(dd, sdnum, offset + sofar,
+ cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
img + sofar, req, 0);
if (cnt < req) {
sofar = -1;
@@ -860,8 +853,8 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
#define VFY_CHUNK 64
#define SD_PRAM_ERROR_LIMIT 42
-int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
- const u8 *img, int len, int offset)
+int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
+ const u8 *img, int len, int offset)
{
int cnt, sofar, req, idx, errors;
unsigned char readback[VFY_CHUNK];
@@ -872,7 +865,7 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
req = len - sofar;
if (req > VFY_CHUNK)
req = VFY_CHUNK;
- cnt = ipath_sd7220_ram_xfer(dd, sdnum, sofar + offset,
+ cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
readback, req, 1);
if (cnt < req) {
/* failed in read itself */
@@ -888,11 +881,13 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
return errors ? -errors : sofar;
}
-/* IRQ not set up at this point in init, so we poll. */
+/*
+ * IRQ not set up at this point in init, so we poll.
+ */
#define IB_SERDES_TRIM_DONE (1ULL << 11)
#define TRIM_TMO (30)
-static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
+static int qib_sd_trimdone_poll(struct qib_devdata *dd)
{
int trim_tmo, ret;
uint64_t val;
@@ -903,16 +898,15 @@ static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
*/
ret = 0;
for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
+ val = qib_read_kreg64(dd, kr_ibcstatus);
if (val & IB_SERDES_TRIM_DONE) {
- ipath_cdbg(VERBOSE, "TRIMDONE after %d\n", trim_tmo);
ret = 1;
break;
}
msleep(10);
}
if (trim_tmo >= TRIM_TMO) {
- ipath_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
+ qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
ret = 0;
}
return ret;
@@ -964,8 +958,7 @@ static struct dds_init {
};
/*
- * Next, values related to Receive Equalization.
- * In comments, FDR (Full) is IB DDR, HDR (Half) is IB SDR
+ * Now the RXEQ section of the table.
*/
/* Hardware packs an element number and register address thus: */
#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
@@ -981,23 +974,23 @@ static struct dds_init {
#define RXEQ_SDR_ZCNT 23
static struct rxeq_init {
- u16 rdesc; /* in form used in SerDesDDSRXEQ */
+ u16 rdesc; /* in form used in SerDesDDSRXEQ */
u8 rdata[4];
} rxeq_init_vals[] = {
/* Set Rcv Eq. to Preset node */
RXEQ_VAL_ALL(7, 0x27, 0x10),
/* Set DFELTHFDR/HDR thresholds */
- RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR */
+ RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */
RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
- /* Set TLTHFDR/HDR threshold */
- RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR */
- RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */
+ /* Set TLTHFDR/HDR theshold */
+ RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */
+ RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */
/* Set Preamp setting 2 (ZFR/ZCNT) */
- RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR */
- RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR */
+ RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */
+ RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */
/* Set Preamp DC gain and Setting 1 (GFR/GHR) */
- RXEQ_VAL(7, 0x1E, 0x10, 0x10, 0x10, 0x10), /* FDR */
- RXEQ_VAL(7, 0x1F, 0x10, 0x10, 0x10, 0x10), /* HDR */
+ RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */
+ RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */
/* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
@@ -1007,27 +1000,27 @@ static struct rxeq_init {
#define DDS_ROWS (16)
#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
-static int ipath_sd_setvals(struct ipath_devdata *dd)
+static int qib_sd_setvals(struct qib_devdata *dd)
{
int idx, midx;
- int min_idx; /* Minimum index for this portion of table */
+ int min_idx; /* Minimum index for this portion of table */
uint32_t dds_reg_map;
u64 __iomem *taddr, *iaddr;
uint64_t data;
uint64_t sdctl;
- taddr = dd->ipath_kregbase + KR_IBSerDesMappTable;
- iaddr = dd->ipath_kregbase + dd->ipath_kregs->kr_ib_ddsrxeq;
+ taddr = dd->kregbase + kr_serdes_maptable;
+ iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
/*
* Init the DDS section of the table.
* Each "row" of the table provokes NUM_DDS_REG writes, to the
* registers indicated in DDS_REG_MAP.
*/
- sdctl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+ sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, sdctl);
+ qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
/*
* Iterate down table within loop for each register to store.
@@ -1037,21 +1030,21 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
writeq(data, iaddr + idx);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
dds_reg_map >>= 4;
for (midx = 0; midx < DDS_ROWS; ++midx) {
u64 __iomem *daddr = taddr + ((midx << 4) + idx);
data = dds_init_vals[midx].reg_vals[idx];
writeq(data, daddr);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
} /* End inner for (vals for this reg, each row) */
} /* end outer for (regs to be stored) */
/*
- * Init the RXEQ section of the table. As explained above the table
- * rxeq_init_vals[], this runs in a different order, as the pattern
- * of register references is more complex, but there are only
+ * Init the RXEQ section of the table.
+ * This runs in a different order, as the pattern of
+ * register references is more complex, but there are only
* four "data" values per register.
*/
min_idx = idx; /* RXEQ indices pick up where DDS left off */
@@ -1066,13 +1059,13 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
/* Store the next RXEQ register address */
writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
/* Iterate through RXEQ values */
for (vidx = 0; vidx < 4; vidx++) {
data = rxeq_init_vals[idx].rdata[vidx];
writeq(data, taddr + (vidx << 6) + idx);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
}
} /* end outer for (Reg-writes for RXEQ) */
return 0;
@@ -1085,33 +1078,18 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
-static int ibsd_sto_noisy(struct ipath_devdata *dd, int loc, int val, int mask)
-{
- int ret = -1;
- int sloc; /* shifted loc, for messages */
-
- loc |= (1U << EPB_IB_QUAD0_CS_SHF);
- sloc = loc >> EPB_ADDR_SHF;
-
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, mask);
- if (ret < 0)
- ipath_dev_err(dd, "Write failed: elt %d,"
- " addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, (sloc >> 4) & 7,
- val & 0xFF, mask & 0xFF);
- return ret;
-}
-
/*
* Repeat a "store" across all channels of the IB SerDes.
* Although nominally it inherits the "read value" of the last
* channel it modified, the only really useful return is <0 for
* failure, >= 0 for success. The parameter 'loc' is assumed to
- * be the location for the channel-0 copy of the register to
- * be modified.
+ * be the location in some channel of the register to be modified
+ * The caller can specify use of the "gang write" option of EPB,
+ * in which case we use the specified channel data for any fields
+ * not explicitely written.
*/
-static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
- int mask)
+static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
+ int mask)
{
int ret = -1;
int chnl;
@@ -1126,24 +1104,27 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
loc |= (1U << EPB_IB_QUAD0_CS_SHF);
chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
if (mask != 0xFF) {
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
- loc & ~EPB_GLOBAL_WR, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
+ loc & ~EPB_GLOBAL_WR, 0, 0);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- ipath_dev_err(dd, "pre-read failed: elt %d,"
- " addr 0x%X, chnl %d\n", (sloc & 0xF),
- (sloc >> 9) & 0x3f, chnl);
+
+ qib_dev_err(dd, "pre-read failed: elt %d,"
+ " addr 0x%X, chnl %d\n",
+ (sloc & 0xF),
+ (sloc >> 9) & 0x3f, chnl);
return ret;
}
val = (ret & ~mask) | (val & mask);
}
loc &= ~(7 << (4+EPB_ADDR_SHF));
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- ipath_dev_err(dd, "Global WR failed: elt %d,"
- " addr 0x%X, val %02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, val);
+
+ qib_dev_err(dd, "Global WR failed: elt %d,"
+ " addr 0x%X, val %02X\n",
+ (sloc & 0xF), (sloc >> 9) & 0x3f, val);
}
return ret;
}
@@ -1151,16 +1132,17 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
loc &= ~(7 << (4+EPB_ADDR_SHF));
loc |= (1U << EPB_IB_QUAD0_CS_SHF);
for (chnl = 0; chnl < 4; ++chnl) {
- int cloc;
- cloc = loc | (chnl << (4+EPB_ADDR_SHF));
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
+ int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
+
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- ipath_dev_err(dd, "Write failed: elt %d,"
- " addr 0x%X, chnl %d, val 0x%02X,"
- " mask 0x%02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
- val & 0xFF, mask & 0xFF);
+
+ qib_dev_err(dd, "Write failed: elt %d,"
+ " addr 0x%X, chnl %d, val 0x%02X,"
+ " mask 0x%02X\n",
+ (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
+ val & 0xFF, mask & 0xFF);
break;
}
}
@@ -1171,7 +1153,7 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
* Set the Tx values normally modified by IBC in IB1.2 mode to default
* values, as gotten from first row of init table.
*/
-static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
+static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
{
int ret;
int idx, reg, data;
@@ -1194,7 +1176,7 @@ static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
* Set the Rx values normally modified by IBC in IB1.2 mode to default
* values, as gotten from selected column of init table.
*/
-static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
+static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
{
int ret;
int ridx;
@@ -1202,6 +1184,7 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
for (ridx = 0; ridx < cnt; ++ridx) {
int elt, reg, val, loc;
+
elt = rxeq_init_vals[ridx].rdesc & 0xF;
reg = rxeq_init_vals[ridx].rdesc >> 4;
loc = EPB_LOC(0, elt, reg);
@@ -1217,83 +1200,66 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
/*
* Set the default values (row 0) for DDR Driver Demphasis.
* we do this initially and whenever we turn off IB-1.2
+ *
* The "default" values for Rx equalization are also stored to
* SerDes registers. Formerly (and still default), we used set 2.
* For experimenting with cables and link-partners, we allow changing
* that via a module parameter.
*/
-static unsigned ipath_rxeq_set = 2;
-module_param_named(rxeq_default_set, ipath_rxeq_set, uint,
- S_IWUSR | S_IRUGO);
+static unsigned qib_rxeq_set = 2;
+module_param_named(rxeq_default_set, qib_rxeq_set, uint,
+ S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(rxeq_default_set,
- "Which set [0..3] of Rx Equalization values is default");
+ "Which set [0..3] of Rx Equalization values is default");
-static int ipath_internal_presets(struct ipath_devdata *dd)
+static int qib_internal_presets(struct qib_devdata *dd)
{
int ret = 0;
ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
if (ret < 0)
- ipath_dev_err(dd, "Failed to set default DDS values\n");
- ret = set_rxeq_vals(dd, ipath_rxeq_set & 3);
+ qib_dev_err(dd, "Failed to set default DDS values\n");
+ ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
if (ret < 0)
- ipath_dev_err(dd, "Failed to set default RXEQ values\n");
+ qib_dev_err(dd, "Failed to set default RXEQ values\n");
return ret;
}
-int ipath_sd7220_presets(struct ipath_devdata *dd)
+int qib_sd7220_presets(struct qib_devdata *dd)
{
int ret = 0;
- if (!dd->ipath_presets_needed)
+ if (!dd->cspec->presets_needed)
return ret;
- dd->ipath_presets_needed = 0;
+ dd->cspec->presets_needed = 0;
/* Assert uC reset, so we don't clash with it. */
- ipath_ibsd_reset(dd, 1);
+ qib_ibsd_reset(dd, 1);
udelay(2);
- ipath_sd_trimdone_monitor(dd, "link-down");
+ qib_sd_trimdone_monitor(dd, "link-down");
- ret = ipath_internal_presets(dd);
-return ret;
+ ret = qib_internal_presets(dd);
+ return ret;
}
-static int ipath_sd_trimself(struct ipath_devdata *dd, int val)
+static int qib_sd_trimself(struct qib_devdata *dd, int val)
{
- return ibsd_sto_noisy(dd, CMUCTRL5, val, 0xFF);
+ int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
+
+ return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
}
-static int ipath_sd_early(struct ipath_devdata *dd)
+static int qib_sd_early(struct qib_devdata *dd)
{
- int ret = -1; /* Default failed */
- int chnl;
+ int ret;
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, RXHSCTRL0(chnl), 0xD4, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, VCDL_DAC2(chnl), 0x2D, 0xFF);
- if (ret < 0)
- goto bail;
- }
- /* more fine-tuning of what will be default */
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, VCDL_CTRL2(chnl), 3, 0xF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, START_EQ1(chnl), 0x10, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, START_EQ2(chnl), 0x30, 0xFF);
- if (ret < 0)
- goto bail;
- }
+ ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
+ if (ret < 0)
+ goto bail;
+ ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
+ if (ret < 0)
+ goto bail;
+ ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
bail:
return ret;
}
@@ -1302,50 +1268,53 @@ bail:
#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
-static int ipath_sd_dactrim(struct ipath_devdata *dd)
+static int qib_sd_dactrim(struct qib_devdata *dd)
{
- int ret = -1; /* Default failed */
- int chnl;
+ int ret;
+
+ ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
+ if (ret < 0)
+ goto bail;
+
+ /* more fine-tuning of what will be default */
+ ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
+ if (ret < 0)
+ goto bail;
+
+ ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
+ if (ret < 0)
+ goto bail;
+
+ ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
+ if (ret < 0)
+ goto bail;
+
+ ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
+ if (ret < 0)
+ goto bail;
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, BACTRL(chnl), 0x40, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x04, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, RXHSSTATUS(chnl), 0x04, 0xFF);
- if (ret < 0)
- goto bail;
- }
/*
- * delay for max possible number of steps, with slop.
+ * Delay for max possible number of steps, with slop.
* Each step is about 4usec.
*/
udelay(415);
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x00, 0xFF);
- if (ret < 0)
- goto bail;
- }
+
+ ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
+
bail:
return ret;
}
#define RELOCK_FIRST_MS 3
#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
-void ipath_toggle_rclkrls(struct ipath_devdata *dd)
+void toggle_7220_rclkrls(struct qib_devdata *dd)
{
int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
int ret;
ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
if (ret < 0)
- ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
+ qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
else {
udelay(1);
ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
@@ -1354,109 +1323,91 @@ void ipath_toggle_rclkrls(struct ipath_devdata *dd)
udelay(1);
ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
if (ret < 0)
- ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
+ qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
else {
udelay(1);
ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
}
/* Now reset xgxs and IBC to complete the recovery */
- dd->ipath_f_xgxs_reset(dd);
+ dd->f_xgxs_reset(dd->pport);
}
/*
* Shut down the timer that polls for relock occasions, if needed
- * this is "hooked" from ipath_7220_quiet_serdes(), which is called
- * just before ipath_shutdown_device() in ipath_driver.c shuts down all
+ * this is "hooked" from qib_7220_quiet_serdes(), which is called
+ * just before qib_shutdown_device() in qib_driver.c shuts down all
* the other timers
*/
-void ipath_shutdown_relock_poll(struct ipath_devdata *dd)
+void shutdown_7220_relock_poll(struct qib_devdata *dd)
{
- struct ipath_relock *irp = &dd->ipath_relock_singleton;
- if (atomic_read(&irp->ipath_relock_timer_active)) {
- del_timer_sync(&irp->ipath_relock_timer);
- atomic_set(&irp->ipath_relock_timer_active, 0);
- }
+ if (dd->cspec->relock_timer_active)
+ del_timer_sync(&dd->cspec->relock_timer);
}
-static unsigned ipath_relock_by_timer = 1;
-module_param_named(relock_by_timer, ipath_relock_by_timer, uint,
- S_IWUSR | S_IRUGO);
+static unsigned qib_relock_by_timer = 1;
+module_param_named(relock_by_timer, qib_relock_by_timer, uint,
+ S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
-static void ipath_run_relock(unsigned long opaque)
+static void qib_run_relock(unsigned long opaque)
{
- struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
- struct ipath_relock *irp = &dd->ipath_relock_singleton;
- u64 val, ltstate;
-
- if (!(dd->ipath_flags & IPATH_INITTED)) {
- /* Not yet up, just reenable the timer for later */
- irp->ipath_relock_interval = HZ;
- mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
- return;
- }
+ struct qib_devdata *dd = (struct qib_devdata *)opaque;
+ struct qib_pportdata *ppd = dd->pport;
+ struct qib_chip_specific *cs = dd->cspec;
+ int timeoff;
/*
- * Check link-training state for "stuck" state.
+ * Check link-training state for "stuck" state, when down.
* if found, try relock and schedule another try at
* exponentially growing delay, maxed at one second.
* if not stuck, our work is done.
*/
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
- ltstate = ipath_ib_linktrstate(dd, val);
-
- if (ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT
- && ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
- int timeoff;
- /* Not up yet. Try again, if allowed by module-param */
- if (ipath_relock_by_timer) {
- if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
- ipath_cdbg(VERBOSE, "Skip RELOCK in AUTONEG\n");
- else if (!(dd->ipath_flags & IPATH_IB_LINK_DISABLED)) {
- ipath_cdbg(VERBOSE, "RELOCK\n");
- ipath_toggle_rclkrls(dd);
- }
+ if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
+ (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE))) {
+ if (qib_relock_by_timer) {
+ if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
+ toggle_7220_rclkrls(dd);
}
/* re-set timer for next check */
- timeoff = irp->ipath_relock_interval << 1;
+ timeoff = cs->relock_interval << 1;
if (timeoff > HZ)
timeoff = HZ;
- irp->ipath_relock_interval = timeoff;
-
- mod_timer(&irp->ipath_relock_timer, jiffies + timeoff);
- } else {
- /* Up, so no more need to check so often */
- mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
- }
+ cs->relock_interval = timeoff;
+ } else
+ timeoff = HZ;
+ mod_timer(&cs->relock_timer, jiffies + timeoff);
}
-void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup)
+void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
{
- struct ipath_relock *irp = &dd->ipath_relock_singleton;
+ struct qib_chip_specific *cs = dd->cspec;
- if (ibup > 0) {
- /* we are now up, so relax timer to 1 second interval */
- if (atomic_read(&irp->ipath_relock_timer_active))
- mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
+ if (ibup) {
+ /* We are now up, relax timer to 1 second interval */
+ if (cs->relock_timer_active) {
+ cs->relock_interval = HZ;
+ mod_timer(&cs->relock_timer, jiffies + HZ);
+ }
} else {
/* Transition to down, (re-)set timer to short interval. */
- int timeout;
- timeout = (HZ * ((ibup == -1) ? 1000 : RELOCK_FIRST_MS))/1000;
+ unsigned int timeout;
+
+ timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
if (timeout == 0)
timeout = 1;
/* If timer has not yet been started, do so. */
- if (atomic_inc_return(&irp->ipath_relock_timer_active) == 1) {
- init_timer(&irp->ipath_relock_timer);
- irp->ipath_relock_timer.function = ipath_run_relock;
- irp->ipath_relock_timer.data = (unsigned long) dd;
- irp->ipath_relock_interval = timeout;
- irp->ipath_relock_timer.expires = jiffies + timeout;
- add_timer(&irp->ipath_relock_timer);
+ if (!cs->relock_timer_active) {
+ cs->relock_timer_active = 1;
+ init_timer(&cs->relock_timer);
+ cs->relock_timer.function = qib_run_relock;
+ cs->relock_timer.data = (unsigned long) dd;
+ cs->relock_interval = timeout;
+ cs->relock_timer.expires = jiffies + timeout;
+ add_timer(&cs->relock_timer);
} else {
- irp->ipath_relock_interval = timeout;
- mod_timer(&irp->ipath_relock_timer, jiffies + timeout);
- atomic_dec(&irp->ipath_relock_timer_active);
+ cs->relock_interval = timeout;
+ mod_timer(&cs->relock_timer, jiffies + timeout);
}
}
}
-
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c b/drivers/infiniband/hw/qib/qib_sd7220_img.c
index 5ef59da9270a..a1118fbd2370 100644
--- a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220_img.c
@@ -38,11 +38,10 @@
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
+#include "qib.h"
+#include "qib_7220.h"
-static unsigned char ipath_sd7220_ib_img[] = {
+static unsigned char qib_sd7220_ib_img[] = {
/*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6,
0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
/*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01,
@@ -1069,14 +1068,14 @@ static unsigned char ipath_sd7220_ib_img[] = {
0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81
};
-int ipath_sd7220_ib_load(struct ipath_devdata *dd)
+int qib_sd7220_ib_load(struct qib_devdata *dd)
{
- return ipath_sd7220_prog_ld(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
- sizeof(ipath_sd7220_ib_img), 0);
+ return qib_sd7220_prog_ld(dd, IB_7220_SERDES, qib_sd7220_ib_img,
+ sizeof(qib_sd7220_ib_img), 0);
}
-int ipath_sd7220_ib_vfy(struct ipath_devdata *dd)
+int qib_sd7220_ib_vfy(struct qib_devdata *dd)
{
- return ipath_sd7220_prog_vfy(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
- sizeof(ipath_sd7220_ib_img), 0);
+ return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, qib_sd7220_ib_img,
+ sizeof(qib_sd7220_ib_img), 0);
}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
new file mode 100644
index 000000000000..b8456881f7f6
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -0,0 +1,973 @@
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/* default pio off, sdma on */
+static ushort sdma_descq_cnt = 256;
+module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
+MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
+
+/*
+ * Bits defined in the send DMA descriptor.
+ */
+#define SDMA_DESC_LAST (1ULL << 11)
+#define SDMA_DESC_FIRST (1ULL << 12)
+#define SDMA_DESC_DMA_HEAD (1ULL << 13)
+#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
+#define SDMA_DESC_INTR (1ULL << 15)
+#define SDMA_DESC_COUNT_LSB 16
+#define SDMA_DESC_GEN_LSB 30
+
+char *qib_sdma_state_names[] = {
+ [qib_sdma_state_s00_hw_down] = "s00_HwDown",
+ [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
+ [qib_sdma_state_s20_idle] = "s20_Idle",
+ [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
+ [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
+ [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
+ [qib_sdma_state_s99_running] = "s99_Running",
+};
+
+char *qib_sdma_event_names[] = {
+ [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
+ [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
+ [qib_sdma_event_e20_hw_started] = "e20_HwStarted",
+ [qib_sdma_event_e30_go_running] = "e30_GoRunning",
+ [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
+ [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
+ [qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
+ [qib_sdma_event_e70_go_idle] = "e70_GoIdle",
+ [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
+ [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
+ [qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
+};
+
+/* declare all statics here rather than keep sorting */
+static int alloc_sdma(struct qib_pportdata *);
+static void sdma_complete(struct kref *);
+static void sdma_finalput(struct qib_sdma_state *);
+static void sdma_get(struct qib_sdma_state *);
+static void sdma_put(struct qib_sdma_state *);
+static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
+static void sdma_start_sw_clean_up(struct qib_pportdata *);
+static void sdma_sw_clean_up_task(unsigned long);
+static void unmap_desc(struct qib_pportdata *, unsigned);
+
+static void sdma_get(struct qib_sdma_state *ss)
+{
+ kref_get(&ss->kref);
+}
+
+static void sdma_complete(struct kref *kref)
+{
+ struct qib_sdma_state *ss =
+ container_of(kref, struct qib_sdma_state, kref);
+
+ complete(&ss->comp);
+}
+
+static void sdma_put(struct qib_sdma_state *ss)
+{
+ kref_put(&ss->kref, sdma_complete);
+}
+
+static void sdma_finalput(struct qib_sdma_state *ss)
+{
+ sdma_put(ss);
+ wait_for_completion(&ss->comp);
+}
+
+/*
+ * Complete all the sdma requests on the active list, in the correct
+ * order, and with appropriate processing. Called when cleaning up
+ * after sdma shutdown, and when new sdma requests are submitted for
+ * a link that is down. This matches what is done for requests
+ * that complete normally, it's just the full list.
+ *
+ * Must be called with sdma_lock held
+ */
+static void clear_sdma_activelist(struct qib_pportdata *ppd)
+{
+ struct qib_sdma_txreq *txp, *txp_next;
+
+ list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
+ list_del_init(&txp->list);
+ if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
+ unsigned idx;
+
+ idx = txp->start_idx;
+ while (idx != txp->next_descq_idx) {
+ unmap_desc(ppd, idx);
+ if (++idx == ppd->sdma_descq_cnt)
+ idx = 0;
+ }
+ }
+ if (txp->callback)
+ (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
+ }
+}
+
+static void sdma_sw_clean_up_task(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ /*
+ * At this point, the following should always be true:
+ * - We are halted, so no more descriptors are getting retired.
+ * - We are not running, so no one is submitting new work.
+ * - Only we can send the e40_sw_cleaned, so we can't start
+ * running again until we say so. So, the active list and
+ * descq are ours to play with.
+ */
+
+ /* Process all retired requests. */
+ qib_sdma_make_progress(ppd);
+
+ clear_sdma_activelist(ppd);
+
+ /*
+ * Resync count of added and removed. It is VERY important that
+ * sdma_descq_removed NEVER decrement - user_sdma depends on it.
+ */
+ ppd->sdma_descq_removed = ppd->sdma_descq_added;
+
+ /*
+ * Reset our notion of head and tail.
+ * Note that the HW registers will be reset when switching states
+ * due to calling __qib_sdma_process_event() below.
+ */
+ ppd->sdma_descq_tail = 0;
+ ppd->sdma_descq_head = 0;
+ ppd->sdma_head_dma[0] = 0;
+ ppd->sdma_generation = 0;
+
+ __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
+ * as a result of send buffer errors or send DMA descriptor errors.
+ * We want to disarm the buffers in these cases.
+ */
+static void sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+ unsigned bufno;
+
+ for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
+ ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
+
+ ppd->dd->f_sdma_hw_start_up(ppd);
+}
+
+static void sdma_sw_tear_down(struct qib_pportdata *ppd)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+
+ /* Releasing this reference means the state machine has stopped. */
+ sdma_put(ss);
+}
+
+static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
+{
+ tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
+}
+
+static void sdma_set_state(struct qib_pportdata *ppd,
+ enum qib_sdma_states next_state)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+ struct sdma_set_state_action *action = ss->set_state_action;
+ unsigned op = 0;
+
+ /* debugging bookkeeping */
+ ss->previous_state = ss->current_state;
+ ss->previous_op = ss->current_op;
+
+ ss->current_state = next_state;
+
+ if (action[next_state].op_enable)
+ op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
+
+ if (action[next_state].op_intenable)
+ op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
+
+ if (action[next_state].op_halt)
+ op |= QIB_SDMA_SENDCTRL_OP_HALT;
+
+ if (action[next_state].op_drain)
+ op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
+
+ if (action[next_state].go_s99_running_tofalse)
+ ss->go_s99_running = 0;
+
+ if (action[next_state].go_s99_running_totrue)
+ ss->go_s99_running = 1;
+
+ ss->current_op = op;
+
+ ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
+}
+
+static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
+{
+ __le64 *descqp = &ppd->sdma_descq[head].qw[0];
+ u64 desc[2];
+ dma_addr_t addr;
+ size_t len;
+
+ desc[0] = le64_to_cpu(descqp[0]);
+ desc[1] = le64_to_cpu(descqp[1]);
+
+ addr = (desc[1] << 32) | (desc[0] >> 32);
+ len = (desc[0] >> 14) & (0x7ffULL << 2);
+ dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
+}
+
+static int alloc_sdma(struct qib_pportdata *ppd)
+{
+ ppd->sdma_descq_cnt = sdma_descq_cnt;
+ if (!ppd->sdma_descq_cnt)
+ ppd->sdma_descq_cnt = 256;
+
+ /* Allocate memory for SendDMA descriptor FIFO */
+ ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
+ ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
+ GFP_KERNEL);
+
+ if (!ppd->sdma_descq) {
+ qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
+ "FIFO memory\n");
+ goto bail;
+ }
+
+ /* Allocate memory for DMA of head register to memory */
+ ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
+ PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
+ if (!ppd->sdma_head_dma) {
+ qib_dev_err(ppd->dd, "failed to allocate SendDMA "
+ "head memory\n");
+ goto cleanup_descq;
+ }
+ ppd->sdma_head_dma[0] = 0;
+ return 0;
+
+cleanup_descq:
+ dma_free_coherent(&ppd->dd->pcidev->dev,
+ ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
+ ppd->sdma_descq_phys);
+ ppd->sdma_descq = NULL;
+ ppd->sdma_descq_phys = 0;
+bail:
+ ppd->sdma_descq_cnt = 0;
+ return -ENOMEM;
+}
+
+static void free_sdma(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+
+ if (ppd->sdma_head_dma) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ (void *)ppd->sdma_head_dma,
+ ppd->sdma_head_phys);
+ ppd->sdma_head_dma = NULL;
+ ppd->sdma_head_phys = 0;
+ }
+
+ if (ppd->sdma_descq) {
+ dma_free_coherent(&dd->pcidev->dev,
+ ppd->sdma_descq_cnt * sizeof(u64[2]),
+ ppd->sdma_descq, ppd->sdma_descq_phys);
+ ppd->sdma_descq = NULL;
+ ppd->sdma_descq_phys = 0;
+ }
+}
+
+static inline void make_sdma_desc(struct qib_pportdata *ppd,
+ u64 *sdmadesc, u64 addr, u64 dwlen,
+ u64 dwoffset)
+{
+
+ WARN_ON(addr & 3);
+ /* SDmaPhyAddr[47:32] */
+ sdmadesc[1] = addr >> 32;
+ /* SDmaPhyAddr[31:0] */
+ sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
+ /* SDmaGeneration[1:0] */
+ sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
+ SDMA_DESC_GEN_LSB;
+ /* SDmaDwordCount[10:0] */
+ sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
+ /* SDmaBufOffset[12:2] */
+ sdmadesc[0] |= dwoffset & 0x7ffULL;
+}
+
+/* sdma_lock must be held */
+int qib_sdma_make_progress(struct qib_pportdata *ppd)
+{
+ struct list_head *lp = NULL;
+ struct qib_sdma_txreq *txp = NULL;
+ struct qib_devdata *dd = ppd->dd;
+ int progress = 0;
+ u16 hwhead;
+ u16 idx = 0;
+
+ hwhead = dd->f_sdma_gethead(ppd);
+
+ /* The reason for some of the complexity of this code is that
+ * not all descriptors have corresponding txps. So, we have to
+ * be able to skip over descs until we wander into the range of
+ * the next txp on the list.
+ */
+
+ if (!list_empty(&ppd->sdma_activelist)) {
+ lp = ppd->sdma_activelist.next;
+ txp = list_entry(lp, struct qib_sdma_txreq, list);
+ idx = txp->start_idx;
+ }
+
+ while (ppd->sdma_descq_head != hwhead) {
+ /* if desc is part of this txp, unmap if needed */
+ if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
+ (idx == ppd->sdma_descq_head)) {
+ unmap_desc(ppd, ppd->sdma_descq_head);
+ if (++idx == ppd->sdma_descq_cnt)
+ idx = 0;
+ }
+
+ /* increment dequed desc count */
+ ppd->sdma_descq_removed++;
+
+ /* advance head, wrap if needed */
+ if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
+ ppd->sdma_descq_head = 0;
+
+ /* if now past this txp's descs, do the callback */
+ if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
+ /* remove from active list */
+ list_del_init(&txp->list);
+ if (txp->callback)
+ (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
+ /* see if there is another txp */
+ if (list_empty(&ppd->sdma_activelist))
+ txp = NULL;
+ else {
+ lp = ppd->sdma_activelist.next;
+ txp = list_entry(lp, struct qib_sdma_txreq,
+ list);
+ idx = txp->start_idx;
+ }
+ }
+ progress = 1;
+ }
+ if (progress)
+ qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
+ return progress;
+}
+
+/*
+ * This is called from interrupt context.
+ */
+void qib_sdma_intr(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ __qib_sdma_intr(ppd);
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+void __qib_sdma_intr(struct qib_pportdata *ppd)
+{
+ if (__qib_sdma_running(ppd))
+ qib_sdma_make_progress(ppd);
+}
+
+int qib_setup_sdma(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+ int ret = 0;
+
+ ret = alloc_sdma(ppd);
+ if (ret)
+ goto bail;
+
+ /* set consistent sdma state */
+ ppd->dd->f_sdma_init_early(ppd);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ /* set up reference counting */
+ kref_init(&ppd->sdma_state.kref);
+ init_completion(&ppd->sdma_state.comp);
+
+ ppd->sdma_generation = 0;
+ ppd->sdma_descq_head = 0;
+ ppd->sdma_descq_removed = 0;
+ ppd->sdma_descq_added = 0;
+
+ INIT_LIST_HEAD(&ppd->sdma_activelist);
+
+ tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
+ (unsigned long)ppd);
+
+ ret = dd->f_init_sdma_regs(ppd);
+ if (ret)
+ goto bail_alloc;
+
+ qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
+
+ return 0;
+
+bail_alloc:
+ qib_teardown_sdma(ppd);
+bail:
+ return ret;
+}
+
+void qib_teardown_sdma(struct qib_pportdata *ppd)
+{
+ qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
+
+ /*
+ * This waits for the state machine to exit so it is not
+ * necessary to kill the sdma_sw_clean_up_task to make sure
+ * it is not running.
+ */
+ sdma_finalput(&ppd->sdma_state);
+
+ free_sdma(ppd);
+}
+
+int qib_sdma_running(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ ret = __qib_sdma_running(ppd);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Complete a request when sdma not running; likely only request
+ * but to simplify the code, always queue it, then process the full
+ * activelist. We process the entire list to ensure that this particular
+ * request does get it's callback, but in the correct order.
+ * Must be called with sdma_lock held
+ */
+static void complete_sdma_err_req(struct qib_pportdata *ppd,
+ struct qib_verbs_txreq *tx)
+{
+ atomic_inc(&tx->qp->s_dma_busy);
+ /* no sdma descriptors, so no unmap_desc */
+ tx->txreq.start_idx = 0;
+ tx->txreq.next_descq_idx = 0;
+ list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
+ clear_sdma_activelist(ppd);
+}
+
+/*
+ * This function queues one IB packet onto the send DMA queue per call.
+ * The caller is responsible for checking:
+ * 1) The number of send DMA descriptor entries is less than the size of
+ * the descriptor queue.
+ * 2) The IB SGE addresses and lengths are 32-bit aligned
+ * (except possibly the last SGE's length)
+ * 3) The SGE addresses are suitable for passing to dma_map_single().
+ */
+int qib_sdma_verbs_send(struct qib_pportdata *ppd,
+ struct qib_sge_state *ss, u32 dwords,
+ struct qib_verbs_txreq *tx)
+{
+ unsigned long flags;
+ struct qib_sge *sge;
+ struct qib_qp *qp;
+ int ret = 0;
+ u16 tail;
+ __le64 *descqp;
+ u64 sdmadesc[2];
+ u32 dwoffset;
+ dma_addr_t addr;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+retry:
+ if (unlikely(!__qib_sdma_running(ppd))) {
+ complete_sdma_err_req(ppd, tx);
+ goto unlock;
+ }
+
+ if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
+ if (qib_sdma_make_progress(ppd))
+ goto retry;
+ if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
+ ppd->dd->f_sdma_set_desc_cnt(ppd,
+ ppd->sdma_descq_cnt / 2);
+ goto busy;
+ }
+
+ dwoffset = tx->hdr_dwords;
+ make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
+
+ sdmadesc[0] |= SDMA_DESC_FIRST;
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
+ sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
+
+ /* write to the descq */
+ tail = ppd->sdma_descq_tail;
+ descqp = &ppd->sdma_descq[tail].qw[0];
+ *descqp++ = cpu_to_le64(sdmadesc[0]);
+ *descqp++ = cpu_to_le64(sdmadesc[1]);
+
+ /* increment the tail */
+ if (++tail == ppd->sdma_descq_cnt) {
+ tail = 0;
+ descqp = &ppd->sdma_descq[0].qw[0];
+ ++ppd->sdma_generation;
+ }
+
+ tx->txreq.start_idx = tail;
+
+ sge = &ss->sge;
+ while (dwords) {
+ u32 dw;
+ u32 len;
+
+ len = dwords << 2;
+ if (len > sge->length)
+ len = sge->length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ dw = (len + 3) >> 2;
+ addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
+ dw << 2, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
+ goto unmap;
+ sdmadesc[0] = 0;
+ make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
+ /* SDmaUseLargeBuf has to be set in every descriptor */
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
+ sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
+ /* write to the descq */
+ *descqp++ = cpu_to_le64(sdmadesc[0]);
+ *descqp++ = cpu_to_le64(sdmadesc[1]);
+
+ /* increment the tail */
+ if (++tail == ppd->sdma_descq_cnt) {
+ tail = 0;
+ descqp = &ppd->sdma_descq[0].qw[0];
+ ++ppd->sdma_generation;
+ }
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+
+ dwoffset += dw;
+ dwords -= dw;
+ }
+
+ if (!tail)
+ descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
+ descqp -= 2;
+ descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
+ descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
+ descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
+
+ atomic_inc(&tx->qp->s_dma_busy);
+ tx->txreq.next_descq_idx = tail;
+ ppd->dd->f_sdma_update_tail(ppd, tail);
+ ppd->sdma_descq_added += tx->txreq.sg_count;
+ list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
+ goto unlock;
+
+unmap:
+ for (;;) {
+ if (!tail)
+ tail = ppd->sdma_descq_cnt - 1;
+ else
+ tail--;
+ if (tail == ppd->sdma_descq_tail)
+ break;
+ unmap_desc(ppd, tail);
+ }
+ qp = tx->qp;
+ qib_put_txreq(tx);
+ spin_lock(&qp->s_lock);
+ if (qp->ibqp.qp_type == IB_QPT_RC) {
+ /* XXX what about error sending RDMA read responses? */
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
+ qib_error_qp(qp, IB_WC_GENERAL_ERR);
+ } else if (qp->s_wqe)
+ qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+ spin_unlock(&qp->s_lock);
+ /* return zero to process the next send work request */
+ goto unlock;
+
+busy:
+ qp = tx->qp;
+ spin_lock(&qp->s_lock);
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ struct qib_ibdev *dev;
+
+ /*
+ * If we couldn't queue the DMA request, save the info
+ * and try again later rather than destroying the
+ * buffer and undoing the side effects of the copy.
+ */
+ tx->ss = ss;
+ tx->dwords = dwords;
+ qp->s_tx = tx;
+ dev = &ppd->dd->verbs_dev;
+ spin_lock(&dev->pending_lock);
+ if (list_empty(&qp->iowait)) {
+ struct qib_ibport *ibp;
+
+ ibp = &ppd->ibport_data;
+ ibp->n_dmawait++;
+ qp->s_flags |= QIB_S_WAIT_DMA_DESC;
+ list_add_tail(&qp->iowait, &dev->dmawait);
+ }
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock(&qp->s_lock);
+ ret = -EBUSY;
+ } else {
+ spin_unlock(&qp->s_lock);
+ qib_put_txreq(tx);
+ }
+unlock:
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ return ret;
+}
+
+void qib_sdma_process_event(struct qib_pportdata *ppd,
+ enum qib_sdma_events event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ __qib_sdma_process_event(ppd, event);
+
+ if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
+ qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+void __qib_sdma_process_event(struct qib_pportdata *ppd,
+ enum qib_sdma_events event)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+
+ switch (ss->current_state) {
+ case qib_sdma_state_s00_hw_down:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ break;
+ case qib_sdma_event_e30_go_running:
+ /*
+ * If down, but running requested (usually result
+ * of link up, then we need to start up.
+ * This can happen when hw down is requested while
+ * bringing the link up with traffic active on
+ * 7220, e.g. */
+ ss->go_s99_running = 1;
+ /* fall through and start dma engine */
+ case qib_sdma_event_e10_go_hw_start:
+ /* This reference means the state machine is started */
+ sdma_get(&ppd->sdma_state);
+ sdma_set_state(ppd,
+ qib_sdma_state_s10_hw_start_up_wait);
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ sdma_sw_tear_down(ppd);
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_sw_tear_down(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ sdma_set_state(ppd, ss->go_s99_running ?
+ qib_sdma_state_s99_running :
+ qib_sdma_state_s20_idle);
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s20_idle:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_sw_tear_down(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ sdma_set_state(ppd, qib_sdma_state_s99_running);
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ sdma_set_state(ppd,
+ qib_sdma_state_s10_hw_start_up_wait);
+ sdma_hw_start_up(ppd);
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ sdma_set_state(ppd,
+ qib_sdma_state_s30_sw_clean_up_wait);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ sdma_set_state(ppd,
+ qib_sdma_state_s40_hw_clean_up_wait);
+ ppd->dd->f_sdma_hw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s99_running:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ sdma_set_state(ppd,
+ qib_sdma_state_s30_sw_clean_up_wait);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e70_go_idle:
+ sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ sdma_set_state(ppd,
+ qib_sdma_state_s30_sw_clean_up_wait);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+ }
+
+ ss->last_event = event;
+}
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c
new file mode 100644
index 000000000000..c3ec8efc2ed8
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_srq.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_post_srq_receive - post a receive on a shared receive queue
+ * @ibsrq: the SRQ to post the receive on
+ * @wr: the list of work requests to post
+ * @bad_wr: A pointer to the first WR to cause a problem is put here
+ *
+ * This may be called from interrupt context.
+ */
+int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+ struct qib_rwq *wq;
+ unsigned long flags;
+ int ret;
+
+ for (; wr; wr = wr->next) {
+ struct qib_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned) wr->num_sge > srq->rq.max_sge) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&srq->rq.lock, flags);
+ wq = srq->rq.wq;
+ next = wq->head + 1;
+ if (next >= srq->rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ wqe = get_rwqe_ptr(&srq->rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
+ wq->head = next;
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_create_srq - create a shared receive queue
+ * @ibpd: the protection domain of the SRQ to create
+ * @srq_init_attr: the attributes of the SRQ
+ * @udata: data from libibverbs when creating a user SRQ
+ */
+struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibpd->device);
+ struct qib_srq *srq;
+ u32 sz;
+ struct ib_srq *ret;
+
+ if (srq_init_attr->attr.max_sge == 0 ||
+ srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
+ srq_init_attr->attr.max_wr == 0 ||
+ srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
+ ret = ERR_PTR(-EINVAL);
+ goto done;
+ }
+
+ srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ srq->rq.size = srq_init_attr->attr.max_wr + 1;
+ srq->rq.max_sge = srq_init_attr->attr.max_sge;
+ sz = sizeof(struct ib_sge) * srq->rq.max_sge +
+ sizeof(struct qib_rwqe);
+ srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
+ if (!srq->rq.wq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_srq;
+ }
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ int err;
+ u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
+
+ srq->ip =
+ qib_create_mmap_info(dev, s, ibpd->uobject->context,
+ srq->rq.wq);
+ if (!srq->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_wq;
+ }
+
+ err = ib_copy_to_udata(udata, &srq->ip->offset,
+ sizeof(srq->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else
+ srq->ip = NULL;
+
+ /*
+ * ib_create_srq() will initialize srq->ibsrq.
+ */
+ spin_lock_init(&srq->rq.lock);
+ srq->rq.wq->head = 0;
+ srq->rq.wq->tail = 0;
+ srq->limit = srq_init_attr->attr.srq_limit;
+
+ spin_lock(&dev->n_srqs_lock);
+ if (dev->n_srqs_allocated == ib_qib_max_srqs) {
+ spin_unlock(&dev->n_srqs_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_srqs_allocated++;
+ spin_unlock(&dev->n_srqs_lock);
+
+ if (srq->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = &srq->ibsrq;
+ goto done;
+
+bail_ip:
+ kfree(srq->ip);
+bail_wq:
+ vfree(srq->rq.wq);
+bail_srq:
+ kfree(srq);
+done:
+ return ret;
+}
+
+/**
+ * qib_modify_srq - modify a shared receive queue
+ * @ibsrq: the SRQ to modify
+ * @attr: the new attributes of the SRQ
+ * @attr_mask: indicates which attributes to modify
+ * @udata: user data for libibverbs.so
+ */
+int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+ struct qib_rwq *wq;
+ int ret = 0;
+
+ if (attr_mask & IB_SRQ_MAX_WR) {
+ struct qib_rwq *owq;
+ struct qib_rwqe *p;
+ u32 sz, size, n, head, tail;
+
+ /* Check that the requested sizes are below the limits. */
+ if ((attr->max_wr > ib_qib_max_srq_wrs) ||
+ ((attr_mask & IB_SRQ_LIMIT) ?
+ attr->srq_limit : srq->limit) > attr->max_wr) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ sz = sizeof(struct qib_rwqe) +
+ srq->rq.max_sge * sizeof(struct ib_sge);
+ size = attr->max_wr + 1;
+ wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
+ if (!wq) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /* Check that we can write the offset to mmap. */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ __u64 offset_addr;
+ __u64 offset = 0;
+
+ ret = ib_copy_from_udata(&offset_addr, udata,
+ sizeof(offset_addr));
+ if (ret)
+ goto bail_free;
+ udata->outbuf =
+ (void __user *) (unsigned long) offset_addr;
+ ret = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (ret)
+ goto bail_free;
+ }
+
+ spin_lock_irq(&srq->rq.lock);
+ /*
+ * validate head and tail pointer values and compute
+ * the number of remaining WQEs.
+ */
+ owq = srq->rq.wq;
+ head = owq->head;
+ tail = owq->tail;
+ if (head >= srq->rq.size || tail >= srq->rq.size) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ n = head;
+ if (n < tail)
+ n += srq->rq.size - tail;
+ else
+ n -= tail;
+ if (size <= n) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ n = 0;
+ p = wq->wq;
+ while (tail != head) {
+ struct qib_rwqe *wqe;
+ int i;
+
+ wqe = get_rwqe_ptr(&srq->rq, tail);
+ p->wr_id = wqe->wr_id;
+ p->num_sge = wqe->num_sge;
+ for (i = 0; i < wqe->num_sge; i++)
+ p->sg_list[i] = wqe->sg_list[i];
+ n++;
+ p = (struct qib_rwqe *)((char *) p + sz);
+ if (++tail >= srq->rq.size)
+ tail = 0;
+ }
+ srq->rq.wq = wq;
+ srq->rq.size = size;
+ wq->head = n;
+ wq->tail = 0;
+ if (attr_mask & IB_SRQ_LIMIT)
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.lock);
+
+ vfree(owq);
+
+ if (srq->ip) {
+ struct qib_mmap_info *ip = srq->ip;
+ struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
+ u32 s = sizeof(struct qib_rwq) + size * sz;
+
+ qib_update_mmap_info(dev, ip, s, wq);
+
+ /*
+ * Return the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ ret = ib_copy_to_udata(udata, &ip->offset,
+ sizeof(ip->offset));
+ if (ret)
+ goto bail;
+ }
+
+ /*
+ * Put user mapping info onto the pending list
+ * unless it already is on the list.
+ */
+ spin_lock_irq(&dev->pending_lock);
+ if (list_empty(&ip->pending_mmaps))
+ list_add(&ip->pending_mmaps,
+ &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+ } else if (attr_mask & IB_SRQ_LIMIT) {
+ spin_lock_irq(&srq->rq.lock);
+ if (attr->srq_limit >= srq->rq.size)
+ ret = -EINVAL;
+ else
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.lock);
+ }
+ goto bail;
+
+bail_unlock:
+ spin_unlock_irq(&srq->rq.lock);
+bail_free:
+ vfree(wq);
+bail:
+ return ret;
+}
+
+int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+
+ attr->max_wr = srq->rq.size - 1;
+ attr->max_sge = srq->rq.max_sge;
+ attr->srq_limit = srq->limit;
+ return 0;
+}
+
+/**
+ * qib_destroy_srq - destroy a shared receive queue
+ * @ibsrq: the SRQ to destroy
+ */
+int qib_destroy_srq(struct ib_srq *ibsrq)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+ struct qib_ibdev *dev = to_idev(ibsrq->device);
+
+ spin_lock(&dev->n_srqs_lock);
+ dev->n_srqs_allocated--;
+ spin_unlock(&dev->n_srqs_lock);
+ if (srq->ip)
+ kref_put(&srq->ip->ref, qib_release_mmap_info);
+ else
+ vfree(srq->rq.wq);
+ kfree(srq);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
new file mode 100644
index 000000000000..dab4d9f4a2cc
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/ctype.h>
+
+#include "qib.h"
+
+/**
+ * qib_parse_ushort - parse an unsigned short value in an arbitrary base
+ * @str: the string containing the number
+ * @valp: where to put the result
+ *
+ * Returns the number of bytes consumed, or negative value on error.
+ */
+static int qib_parse_ushort(const char *str, unsigned short *valp)
+{
+ unsigned long val;
+ char *end;
+ int ret;
+
+ if (!isdigit(str[0])) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ val = simple_strtoul(str, &end, 0);
+
+ if (val > 0xffff) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ *valp = val;
+
+ ret = end + 1 - str;
+ if (ret == 0)
+ ret = -EINVAL;
+
+bail:
+ return ret;
+}
+
+/* start of per-port functions */
+/*
+ * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
+ */
+static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret;
+
+ ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return ret;
+}
+
+static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
+ size_t count)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret;
+ u16 val;
+
+ ret = qib_parse_ushort(buf, &val);
+
+ /*
+ * Set the "intentional" heartbeat enable per either of
+ * "Enable" and "Auto", as these are normally set together.
+ * This bit is consulted when leaving loopback mode,
+ * because entering loopback mode overrides it and automatically
+ * disables heartbeat.
+ */
+ if (ret >= 0)
+ ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
+ if (ret < 0)
+ qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
+ size_t count)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret = count, r;
+
+ r = dd->f_set_ib_loopback(ppd, buf);
+ if (r < 0)
+ ret = r;
+
+ return ret;
+}
+
+static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
+ size_t count)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret;
+ u16 val;
+
+ ret = qib_parse_ushort(buf, &val);
+ if (ret > 0)
+ qib_set_led_override(ppd, val);
+ else
+ qib_dev_err(dd, "attempt to set invalid LED override\n");
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
+{
+ ssize_t ret;
+
+ if (!ppd->statusp)
+ ret = -EINVAL;
+ else
+ ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long) *(ppd->statusp));
+ return ret;
+}
+
+/*
+ * For userland compatibility, these offsets must remain fixed.
+ * They are strings for QIB_STATUS_*
+ */
+static const char *qib_status_str[] = {
+ "Initted",
+ "",
+ "",
+ "",
+ "",
+ "Present",
+ "IB_link_up",
+ "IB_configured",
+ "",
+ "Fatal_Hardware_Error",
+ NULL,
+};
+
+static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
+{
+ int i, any;
+ u64 s;
+ ssize_t ret;
+
+ if (!ppd->statusp) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ s = *(ppd->statusp);
+ *buf = '\0';
+ for (any = i = 0; s && qib_status_str[i]; i++) {
+ if (s & 1) {
+ /* if overflow */
+ if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
+ break;
+ if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
+ PAGE_SIZE)
+ break;
+ any = 1;
+ }
+ s >>= 1;
+ }
+ if (any)
+ strlcat(buf, "\n", PAGE_SIZE);
+
+ ret = strlen(buf);
+
+bail:
+ return ret;
+}
+
+/* end of per-port functions */
+
+/*
+ * Start of per-port file structures and support code
+ * Because we are fitting into other infrastructure, we have to supply the
+ * full set of kobject/sysfs_ops structures and routines.
+ */
+#define QIB_PORT_ATTR(name, mode, show, store) \
+ static struct qib_port_attr qib_port_attr_##name = \
+ __ATTR(name, mode, show, store)
+
+struct qib_port_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct qib_pportdata *, char *);
+ ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
+};
+
+QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
+QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
+QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
+ store_hrtbt_enb);
+QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
+QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
+
+static struct attribute *port_default_attributes[] = {
+ &qib_port_attr_loopback.attr,
+ &qib_port_attr_led_override.attr,
+ &qib_port_attr_hrtbt_enable.attr,
+ &qib_port_attr_status.attr,
+ &qib_port_attr_status_str.attr,
+ NULL
+};
+
+static ssize_t qib_portattr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct qib_port_attr *pattr =
+ container_of(attr, struct qib_port_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, pport_kobj);
+
+ return pattr->show(ppd, buf);
+}
+
+static ssize_t qib_portattr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t len)
+{
+ struct qib_port_attr *pattr =
+ container_of(attr, struct qib_port_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, pport_kobj);
+
+ return pattr->store(ppd, buf, len);
+}
+
+static void qib_port_release(struct kobject *kobj)
+{
+ /* nothing to do since memory is freed by qib_free_devdata() */
+}
+
+static const struct sysfs_ops qib_port_ops = {
+ .show = qib_portattr_show,
+ .store = qib_portattr_store,
+};
+
+static struct kobj_type qib_port_ktype = {
+ .release = qib_port_release,
+ .sysfs_ops = &qib_port_ops,
+ .default_attrs = port_default_attributes
+};
+
+/* Start sl2vl */
+
+#define QIB_SL2VL_ATTR(N) \
+ static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
+ .attr = { .name = __stringify(N), .mode = 0444 }, \
+ .sl = N \
+ }
+
+struct qib_sl2vl_attr {
+ struct attribute attr;
+ int sl;
+};
+
+QIB_SL2VL_ATTR(0);
+QIB_SL2VL_ATTR(1);
+QIB_SL2VL_ATTR(2);
+QIB_SL2VL_ATTR(3);
+QIB_SL2VL_ATTR(4);
+QIB_SL2VL_ATTR(5);
+QIB_SL2VL_ATTR(6);
+QIB_SL2VL_ATTR(7);
+QIB_SL2VL_ATTR(8);
+QIB_SL2VL_ATTR(9);
+QIB_SL2VL_ATTR(10);
+QIB_SL2VL_ATTR(11);
+QIB_SL2VL_ATTR(12);
+QIB_SL2VL_ATTR(13);
+QIB_SL2VL_ATTR(14);
+QIB_SL2VL_ATTR(15);
+
+static struct attribute *sl2vl_default_attributes[] = {
+ &qib_sl2vl_attr_0.attr,
+ &qib_sl2vl_attr_1.attr,
+ &qib_sl2vl_attr_2.attr,
+ &qib_sl2vl_attr_3.attr,
+ &qib_sl2vl_attr_4.attr,
+ &qib_sl2vl_attr_5.attr,
+ &qib_sl2vl_attr_6.attr,
+ &qib_sl2vl_attr_7.attr,
+ &qib_sl2vl_attr_8.attr,
+ &qib_sl2vl_attr_9.attr,
+ &qib_sl2vl_attr_10.attr,
+ &qib_sl2vl_attr_11.attr,
+ &qib_sl2vl_attr_12.attr,
+ &qib_sl2vl_attr_13.attr,
+ &qib_sl2vl_attr_14.attr,
+ &qib_sl2vl_attr_15.attr,
+ NULL
+};
+
+static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct qib_sl2vl_attr *sattr =
+ container_of(attr, struct qib_sl2vl_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, sl2vl_kobj);
+ struct qib_ibport *qibp = &ppd->ibport_data;
+
+ return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
+}
+
+static const struct sysfs_ops qib_sl2vl_ops = {
+ .show = sl2vl_attr_show,
+};
+
+static struct kobj_type qib_sl2vl_ktype = {
+ .release = qib_port_release,
+ .sysfs_ops = &qib_sl2vl_ops,
+ .default_attrs = sl2vl_default_attributes
+};
+
+/* End sl2vl */
+
+/* Start diag_counters */
+
+#define QIB_DIAGC_ATTR(N) \
+ static struct qib_diagc_attr qib_diagc_attr_##N = { \
+ .attr = { .name = __stringify(N), .mode = 0444 }, \
+ .counter = offsetof(struct qib_ibport, n_##N) \
+ }
+
+struct qib_diagc_attr {
+ struct attribute attr;
+ size_t counter;
+};
+
+QIB_DIAGC_ATTR(rc_resends);
+QIB_DIAGC_ATTR(rc_acks);
+QIB_DIAGC_ATTR(rc_qacks);
+QIB_DIAGC_ATTR(rc_delayed_comp);
+QIB_DIAGC_ATTR(seq_naks);
+QIB_DIAGC_ATTR(rdma_seq);
+QIB_DIAGC_ATTR(rnr_naks);
+QIB_DIAGC_ATTR(other_naks);
+QIB_DIAGC_ATTR(rc_timeouts);
+QIB_DIAGC_ATTR(loop_pkts);
+QIB_DIAGC_ATTR(pkt_drops);
+QIB_DIAGC_ATTR(dmawait);
+QIB_DIAGC_ATTR(unaligned);
+QIB_DIAGC_ATTR(rc_dupreq);
+QIB_DIAGC_ATTR(rc_seqnak);
+
+static struct attribute *diagc_default_attributes[] = {
+ &qib_diagc_attr_rc_resends.attr,
+ &qib_diagc_attr_rc_acks.attr,
+ &qib_diagc_attr_rc_qacks.attr,
+ &qib_diagc_attr_rc_delayed_comp.attr,
+ &qib_diagc_attr_seq_naks.attr,
+ &qib_diagc_attr_rdma_seq.attr,
+ &qib_diagc_attr_rnr_naks.attr,
+ &qib_diagc_attr_other_naks.attr,
+ &qib_diagc_attr_rc_timeouts.attr,
+ &qib_diagc_attr_loop_pkts.attr,
+ &qib_diagc_attr_pkt_drops.attr,
+ &qib_diagc_attr_dmawait.attr,
+ &qib_diagc_attr_unaligned.attr,
+ &qib_diagc_attr_rc_dupreq.attr,
+ &qib_diagc_attr_rc_seqnak.attr,
+ NULL
+};
+
+static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct qib_diagc_attr *dattr =
+ container_of(attr, struct qib_diagc_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, diagc_kobj);
+ struct qib_ibport *qibp = &ppd->ibport_data;
+
+ return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
+}
+
+static const struct sysfs_ops qib_diagc_ops = {
+ .show = diagc_attr_show,
+};
+
+static struct kobj_type qib_diagc_ktype = {
+ .release = qib_port_release,
+ .sysfs_ops = &qib_diagc_ops,
+ .default_attrs = diagc_default_attributes
+};
+
+/* End diag_counters */
+
+/* end of per-port file structures and support code */
+
+/*
+ * Start of per-unit (or driver, in some cases, but replicated
+ * per unit) functions (these get a device *)
+ */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+
+ return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
+}
+
+static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int ret;
+
+ if (!dd->boardname)
+ ret = -EINVAL;
+ else
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
+ return ret;
+}
+
+static ssize_t show_version(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
+}
+
+static ssize_t show_boardversion(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
+}
+
+
+static ssize_t show_localbus_info(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
+}
+
+
+static ssize_t show_nctxts(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* Return the number of user ports (contexts) available. */
+ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
+ dd->first_user_ctxt);
+}
+
+static ssize_t show_serial(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ buf[sizeof dd->serial] = '\0';
+ memcpy(buf, dd->serial, sizeof dd->serial);
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
+static ssize_t store_chip_reset(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int ret;
+
+ if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ret = qib_reset_device(dd->unit);
+bail:
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_logged_errs(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int idx, count;
+
+ /* force consistency with actual EEPROM */
+ if (qib_update_eeprom_log(dd) != 0)
+ return -ENXIO;
+
+ count = 0;
+ for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+ count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
+ dd->eep_st_errs[idx],
+ idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
+ }
+
+ return count;
+}
+
+/*
+ * Dump tempsense regs. in decimal, to ease shell-scripts.
+ */
+static ssize_t show_tempsense(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int ret;
+ int idx;
+ u8 regvals[8];
+
+ ret = -ENXIO;
+ for (idx = 0; idx < 8; ++idx) {
+ if (idx == 6)
+ continue;
+ ret = dd->f_tempsense_rd(dd, idx);
+ if (ret < 0)
+ break;
+ regvals[idx] = ret;
+ }
+ if (idx == 8)
+ ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
+ *(signed char *)(regvals),
+ *(signed char *)(regvals + 1),
+ regvals[2], regvals[3],
+ *(signed char *)(regvals + 5),
+ *(signed char *)(regvals + 7));
+ return ret;
+}
+
+/*
+ * end of per-unit (or driver, in some cases, but replicated
+ * per unit) functions
+ */
+
+/* start of per-unit file structures and support code */
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
+static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
+static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
+static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
+static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
+static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
+static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
+
+static struct device_attribute *qib_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_hca_type,
+ &dev_attr_board_id,
+ &dev_attr_version,
+ &dev_attr_nctxts,
+ &dev_attr_serial,
+ &dev_attr_boardversion,
+ &dev_attr_logged_errors,
+ &dev_attr_tempsense,
+ &dev_attr_localbus_info,
+ &dev_attr_chip_reset,
+};
+
+int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+ struct kobject *kobj)
+{
+ struct qib_pportdata *ppd;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ int ret;
+
+ if (!port_num || port_num > dd->num_pports) {
+ qib_dev_err(dd, "Skipping infiniband class with "
+ "invalid port %u\n", port_num);
+ ret = -ENODEV;
+ goto bail;
+ }
+ ppd = &dd->pport[port_num - 1];
+
+ ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
+ "linkcontrol");
+ if (ret) {
+ qib_dev_err(dd, "Skipping linkcontrol sysfs info, "
+ "(err %d) port %u\n", ret, port_num);
+ goto bail;
+ }
+ kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
+
+ ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
+ "sl2vl");
+ if (ret) {
+ qib_dev_err(dd, "Skipping sl2vl sysfs info, "
+ "(err %d) port %u\n", ret, port_num);
+ goto bail_sl;
+ }
+ kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
+
+ ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
+ "diag_counters");
+ if (ret) {
+ qib_dev_err(dd, "Skipping diag_counters sysfs info, "
+ "(err %d) port %u\n", ret, port_num);
+ goto bail_diagc;
+ }
+ kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
+
+ return 0;
+
+bail_diagc:
+ kobject_put(&ppd->sl2vl_kobj);
+bail_sl:
+ kobject_put(&ppd->pport_kobj);
+bail:
+ return ret;
+}
+
+/*
+ * Register and create our files in /sys/class/infiniband.
+ */
+int qib_verbs_register_sysfs(struct qib_devdata *dd)
+{
+ struct ib_device *dev = &dd->verbs_dev.ibdev;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
+ ret = device_create_file(&dev->dev, qib_attributes[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Unregister and remove our files in /sys/class/infiniband.
+ */
+void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ int i;
+
+ for (i = 0; i < dd->num_pports; i++) {
+ ppd = &dd->pport[i];
+ kobject_put(&ppd->pport_kobj);
+ kobject_put(&ppd->sl2vl_kobj);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
new file mode 100644
index 000000000000..6f31ca5039db
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * QLogic_IB "Two Wire Serial Interface" driver.
+ * Originally written for a not-quite-i2c serial eeprom, which is
+ * still used on some supported boards. Later boards have added a
+ * variety of other uses, most board-specific, so teh bit-boffing
+ * part has been split off to this file, while the other parts
+ * have been moved to chip-specific files.
+ *
+ * We have also dropped all pretense of fully generic (e.g. pretend
+ * we don't know whether '1' is the higher voltage) interface, as
+ * the restrictions of the generic i2c interface (e.g. no access from
+ * driver itself) make it unsuitable for this use.
+ */
+
+#define READ_CMD 1
+#define WRITE_CMD 0
+
+/**
+ * i2c_wait_for_writes - wait for a write
+ * @dd: the qlogic_ib device
+ *
+ * We use this instead of udelay directly, so we can make sure
+ * that previous register writes have been flushed all the way
+ * to the chip. Since we are delaying anyway, the cost doesn't
+ * hurt, and makes the bit twiddling more regular
+ */
+static void i2c_wait_for_writes(struct qib_devdata *dd)
+{
+ /*
+ * implicit read of EXTStatus is as good as explicit
+ * read of scratch, if all we want to do is flush
+ * writes.
+ */
+ dd->f_gpio_mod(dd, 0, 0, 0);
+ rmb(); /* inlined, so prevent compiler reordering */
+}
+
+/*
+ * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
+ * for "almost compliant" modules
+ */
+#define SCL_WAIT_USEC 1000
+
+/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
+ * Should be 20, but some chips need more.
+ */
+#define TWSI_BUF_WAIT_USEC 60
+
+static void scl_out(struct qib_devdata *dd, u8 bit)
+{
+ u32 mask;
+
+ udelay(1);
+
+ mask = 1UL << dd->gpio_scl_num;
+
+ /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
+ dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
+
+ /*
+ * Allow for slow slaves by simple
+ * delay for falling edge, sampling on rise.
+ */
+ if (!bit)
+ udelay(2);
+ else {
+ int rise_usec;
+ for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
+ if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
+ break;
+ udelay(2);
+ }
+ if (rise_usec <= 0)
+ qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
+ SCL_WAIT_USEC);
+ }
+ i2c_wait_for_writes(dd);
+}
+
+static void sda_out(struct qib_devdata *dd, u8 bit)
+{
+ u32 mask;
+
+ mask = 1UL << dd->gpio_sda_num;
+
+ /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
+ dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
+
+ i2c_wait_for_writes(dd);
+ udelay(2);
+}
+
+static u8 sda_in(struct qib_devdata *dd, int wait)
+{
+ int bnum;
+ u32 read_val, mask;
+
+ bnum = dd->gpio_sda_num;
+ mask = (1UL << bnum);
+ /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
+ dd->f_gpio_mod(dd, 0, 0, mask);
+ read_val = dd->f_gpio_mod(dd, 0, 0, 0);
+ if (wait)
+ i2c_wait_for_writes(dd);
+ return (read_val & mask) >> bnum;
+}
+
+/**
+ * i2c_ackrcv - see if ack following write is true
+ * @dd: the qlogic_ib device
+ */
+static int i2c_ackrcv(struct qib_devdata *dd)
+{
+ u8 ack_received;
+
+ /* AT ENTRY SCL = LOW */
+ /* change direction, ignore data */
+ ack_received = sda_in(dd, 1);
+ scl_out(dd, 1);
+ ack_received = sda_in(dd, 1) == 0;
+ scl_out(dd, 0);
+ return ack_received;
+}
+
+static void stop_cmd(struct qib_devdata *dd);
+
+/**
+ * rd_byte - read a byte, sending STOP on last, else ACK
+ * @dd: the qlogic_ib device
+ *
+ * Returns byte shifted out of device
+ */
+static int rd_byte(struct qib_devdata *dd, int last)
+{
+ int bit_cntr, data;
+
+ data = 0;
+
+ for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
+ data <<= 1;
+ scl_out(dd, 1);
+ data |= sda_in(dd, 0);
+ scl_out(dd, 0);
+ }
+ if (last) {
+ scl_out(dd, 1);
+ stop_cmd(dd);
+ } else {
+ sda_out(dd, 0);
+ scl_out(dd, 1);
+ scl_out(dd, 0);
+ sda_out(dd, 1);
+ }
+ return data;
+}
+
+/**
+ * wr_byte - write a byte, one bit at a time
+ * @dd: the qlogic_ib device
+ * @data: the byte to write
+ *
+ * Returns 0 if we got the following ack, otherwise 1
+ */
+static int wr_byte(struct qib_devdata *dd, u8 data)
+{
+ int bit_cntr;
+ u8 bit;
+
+ for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
+ bit = (data >> bit_cntr) & 1;
+ sda_out(dd, bit);
+ scl_out(dd, 1);
+ scl_out(dd, 0);
+ }
+ return (!i2c_ackrcv(dd)) ? 1 : 0;
+}
+
+/*
+ * issue TWSI start sequence:
+ * (both clock/data high, clock high, data low while clock is high)
+ */
+static void start_seq(struct qib_devdata *dd)
+{
+ sda_out(dd, 1);
+ scl_out(dd, 1);
+ sda_out(dd, 0);
+ udelay(1);
+ scl_out(dd, 0);
+}
+
+/**
+ * stop_seq - transmit the stop sequence
+ * @dd: the qlogic_ib device
+ *
+ * (both clock/data low, clock high, data high while clock is high)
+ */
+static void stop_seq(struct qib_devdata *dd)
+{
+ scl_out(dd, 0);
+ sda_out(dd, 0);
+ scl_out(dd, 1);
+ sda_out(dd, 1);
+}
+
+/**
+ * stop_cmd - transmit the stop condition
+ * @dd: the qlogic_ib device
+ *
+ * (both clock/data low, clock high, data high while clock is high)
+ */
+static void stop_cmd(struct qib_devdata *dd)
+{
+ stop_seq(dd);
+ udelay(TWSI_BUF_WAIT_USEC);
+}
+
+/**
+ * qib_twsi_reset - reset I2C communication
+ * @dd: the qlogic_ib device
+ */
+
+int qib_twsi_reset(struct qib_devdata *dd)
+{
+ int clock_cycles_left = 9;
+ int was_high = 0;
+ u32 pins, mask;
+
+ /* Both SCL and SDA should be high. If not, there
+ * is something wrong.
+ */
+ mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
+
+ /*
+ * Force pins to desired innocuous state.
+ * This is the default power-on state with out=0 and dir=0,
+ * So tri-stated and should be floating high (barring HW problems)
+ */
+ dd->f_gpio_mod(dd, 0, 0, mask);
+
+ /*
+ * Clock nine times to get all listeners into a sane state.
+ * If SDA does not go high at any point, we are wedged.
+ * One vendor recommends then issuing START followed by STOP.
+ * we cannot use our "normal" functions to do that, because
+ * if SCL drops between them, another vendor's part will
+ * wedge, dropping SDA and keeping it low forever, at the end of
+ * the next transaction (even if it was not the device addressed).
+ * So our START and STOP take place with SCL held high.
+ */
+ while (clock_cycles_left--) {
+ scl_out(dd, 0);
+ scl_out(dd, 1);
+ /* Note if SDA is high, but keep clocking to sync slave */
+ was_high |= sda_in(dd, 0);
+ }
+
+ if (was_high) {
+ /*
+ * We saw a high, which we hope means the slave is sync'd.
+ * Issue START, STOP, pause for T_BUF.
+ */
+
+ pins = dd->f_gpio_mod(dd, 0, 0, 0);
+ if ((pins & mask) != mask)
+ qib_dev_err(dd, "GPIO pins not at rest: %d\n",
+ pins & mask);
+ /* Drop SDA to issue START */
+ udelay(1); /* Guarantee .6 uSec setup */
+ sda_out(dd, 0);
+ udelay(1); /* Guarantee .6 uSec hold */
+ /* At this point, SCL is high, SDA low. Raise SDA for STOP */
+ sda_out(dd, 1);
+ udelay(TWSI_BUF_WAIT_USEC);
+ }
+
+ return !was_high;
+}
+
+#define QIB_TWSI_START 0x100
+#define QIB_TWSI_STOP 0x200
+
+/* Write byte to TWSI, optionally prefixed with START or suffixed with
+ * STOP.
+ * returns 0 if OK (ACK received), else != 0
+ */
+static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
+{
+ int ret = 1;
+ if (flags & QIB_TWSI_START)
+ start_seq(dd);
+
+ ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
+
+ if (flags & QIB_TWSI_STOP)
+ stop_cmd(dd);
+ return ret;
+}
+
+/* Added functionality for IBA7220-based cards */
+#define QIB_TEMP_DEV 0x98
+
+/*
+ * qib_twsi_blk_rd
+ * Formerly called qib_eeprom_internal_read, and only used for eeprom,
+ * but now the general interface for data transfer from twsi devices.
+ * One vestige of its former role is that it recognizes a device
+ * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
+ * which responded to all TWSI device codes, interpreting them as
+ * address within device. On all other devices found on board handled by
+ * this driver, the device is followed by a one-byte "address" which selects
+ * the "register" or "offset" within the device from which data should
+ * be read.
+ */
+int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
+ void *buffer, int len)
+{
+ int ret;
+ u8 *bp = buffer;
+
+ ret = 1;
+
+ if (dev == QIB_TWSI_NO_DEV) {
+ /* legacy not-really-I2C */
+ addr = (addr << 1) | READ_CMD;
+ ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
+ } else {
+ /* Actual I2C */
+ ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
+ if (ret) {
+ stop_cmd(dd);
+ ret = 1;
+ goto bail;
+ }
+ /*
+ * SFF spec claims we do _not_ stop after the addr
+ * but simply issue a start with the "read" dev-addr.
+ * Since we are implicitely waiting for ACK here,
+ * we need t_buf (nominally 20uSec) before that start,
+ * and cannot rely on the delay built in to the STOP
+ */
+ ret = qib_twsi_wr(dd, addr, 0);
+ udelay(TWSI_BUF_WAIT_USEC);
+
+ if (ret) {
+ qib_dev_err(dd,
+ "Failed to write interface read addr %02X\n",
+ addr);
+ ret = 1;
+ goto bail;
+ }
+ ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
+ }
+ if (ret) {
+ stop_cmd(dd);
+ ret = 1;
+ goto bail;
+ }
+
+ /*
+ * block devices keeps clocking data out as long as we ack,
+ * automatically incrementing the address. Some have "pages"
+ * whose boundaries will not be crossed, but the handling
+ * of these is left to the caller, who is in a better
+ * position to know.
+ */
+ while (len-- > 0) {
+ /*
+ * Get and store data, sending ACK if length remaining,
+ * else STOP
+ */
+ *bp++ = rd_byte(dd, !len);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/*
+ * qib_twsi_blk_wr
+ * Formerly called qib_eeprom_internal_write, and only used for eeprom,
+ * but now the general interface for data transfer to twsi devices.
+ * One vestige of its former role is that it recognizes a device
+ * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
+ * which responded to all TWSI device codes, interpreting them as
+ * address within device. On all other devices found on board handled by
+ * this driver, the device is followed by a one-byte "address" which selects
+ * the "register" or "offset" within the device to which data should
+ * be written.
+ */
+int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
+ const void *buffer, int len)
+{
+ int sub_len;
+ const u8 *bp = buffer;
+ int max_wait_time, i;
+ int ret;
+ ret = 1;
+
+ while (len > 0) {
+ if (dev == QIB_TWSI_NO_DEV) {
+ if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
+ QIB_TWSI_START)) {
+ goto failed_write;
+ }
+ } else {
+ /* Real I2C */
+ if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
+ goto failed_write;
+ ret = qib_twsi_wr(dd, addr, 0);
+ if (ret) {
+ qib_dev_err(dd, "Failed to write interface"
+ " write addr %02X\n", addr);
+ goto failed_write;
+ }
+ }
+
+ sub_len = min(len, 4);
+ addr += sub_len;
+ len -= sub_len;
+
+ for (i = 0; i < sub_len; i++)
+ if (qib_twsi_wr(dd, *bp++, 0))
+ goto failed_write;
+
+ stop_cmd(dd);
+
+ /*
+ * Wait for write complete by waiting for a successful
+ * read (the chip replies with a zero after the write
+ * cmd completes, and before it writes to the eeprom.
+ * The startcmd for the read will fail the ack until
+ * the writes have completed. We do this inline to avoid
+ * the debug prints that are in the real read routine
+ * if the startcmd fails.
+ * We also use the proper device address, so it doesn't matter
+ * whether we have real eeprom_dev. Legacy likes any address.
+ */
+ max_wait_time = 100;
+ while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
+ stop_cmd(dd);
+ if (!--max_wait_time)
+ goto failed_write;
+ }
+ /* now read (and ignore) the resulting byte */
+ rd_byte(dd, 1);
+ }
+
+ ret = 0;
+ goto bail;
+
+failed_write:
+ stop_cmd(dd);
+ ret = 1;
+
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
new file mode 100644
index 000000000000..f7eb1ddff5f3
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+static unsigned qib_hol_timeout_ms = 3000;
+module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
+MODULE_PARM_DESC(hol_timeout_ms,
+ "duration of user app suspension after link failure");
+
+unsigned qib_sdma_fetch_arb = 1;
+module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
+MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
+
+/**
+ * qib_disarm_piobufs - cancel a range of PIO buffers
+ * @dd: the qlogic_ib device
+ * @first: the first PIO buffer to cancel
+ * @cnt: the number of PIO buffers to cancel
+ *
+ * Cancel a range of PIO buffers. Used at user process close,
+ * in case it died while writing to a PIO buffer.
+ */
+void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
+{
+ unsigned long flags;
+ unsigned i;
+ unsigned last;
+
+ last = first + cnt;
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (i = first; i < last; i++) {
+ __clear_bit(i, dd->pio_need_disarm);
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/*
+ * This is called by a user process when it sees the DISARM_BUFS event
+ * bit is set.
+ */
+int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned i;
+ unsigned last;
+ unsigned n = 0;
+
+ last = rcd->pio_base + rcd->piocnt;
+ /*
+ * Don't need uctxt_lock here, since user has called in to us.
+ * Clear at start in case more interrupts set bits while we
+ * are disarming
+ */
+ if (rcd->user_event_mask) {
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ spin_lock_irq(&dd->pioavail_lock);
+ for (i = rcd->pio_base; i < last; i++) {
+ if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
+ n++;
+ dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
+ }
+ }
+ spin_unlock_irq(&dd->pioavail_lock);
+ return 0;
+}
+
+static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
+{
+ struct qib_pportdata *ppd;
+ unsigned pidx;
+
+ for (pidx = 0; pidx < dd->num_pports; pidx++) {
+ ppd = dd->pport + pidx;
+ if (i >= ppd->sdma_state.first_sendbuf &&
+ i < ppd->sdma_state.last_sendbuf)
+ return ppd;
+ }
+ return NULL;
+}
+
+/*
+ * Return true if send buffer is being used by a user context.
+ * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
+ */
+static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned ctxt;
+ int ret = 0;
+
+ spin_lock(&dd->uctxt_lock);
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ rcd = dd->rcd[ctxt];
+ if (!rcd || bufn < rcd->pio_base ||
+ bufn >= rcd->pio_base + rcd->piocnt)
+ continue;
+ if (rcd->user_event_mask) {
+ int i;
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ ret = 1;
+ break;
+ }
+ spin_unlock(&dd->uctxt_lock);
+
+ return ret;
+}
+
+/*
+ * Disarm a set of send buffers. If the buffer might be actively being
+ * written to, mark the buffer to be disarmed later when it is not being
+ * written to.
+ *
+ * This should only be called from the IRQ error handler.
+ */
+void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
+ unsigned cnt)
+{
+ struct qib_pportdata *ppd, *pppd[dd->num_pports];
+ unsigned i;
+ unsigned long flags;
+
+ for (i = 0; i < dd->num_pports; i++)
+ pppd[i] = NULL;
+
+ for (i = 0; i < cnt; i++) {
+ int which;
+ if (!test_bit(i, mask))
+ continue;
+ /*
+ * If the buffer is owned by the DMA hardware,
+ * reset the DMA engine.
+ */
+ ppd = is_sdma_buf(dd, i);
+ if (ppd) {
+ pppd[ppd->port] = ppd;
+ continue;
+ }
+ /*
+ * If the kernel is writing the buffer or the buffer is
+ * owned by a user process, we can't clear it yet.
+ */
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ if (test_bit(i, dd->pio_writing) ||
+ (!test_bit(i << 1, dd->pioavailkernel) &&
+ find_ctxt(dd, i))) {
+ __set_bit(i, dd->pio_need_disarm);
+ which = 0;
+ } else {
+ which = 1;
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+ }
+
+ /* do cancel_sends once per port that had sdma piobufs in error */
+ for (i = 0; i < dd->num_pports; i++)
+ if (pppd[i])
+ qib_cancel_sends(pppd[i]);
+}
+
+/**
+ * update_send_bufs - update shadow copy of the PIO availability map
+ * @dd: the qlogic_ib device
+ *
+ * called whenever our local copy indicates we have run out of send buffers
+ */
+static void update_send_bufs(struct qib_devdata *dd)
+{
+ unsigned long flags;
+ unsigned i;
+ const unsigned piobregs = dd->pioavregs;
+
+ /*
+ * If the generation (check) bits have changed, then we update the
+ * busy bit for the corresponding PIO buffer. This algorithm will
+ * modify positions to the value they already have in some cases
+ * (i.e., no change), but it's faster than changing only the bits
+ * that have changed.
+ *
+ * We would like to do this atomicly, to avoid spinlocks in the
+ * critical send path, but that's not really possible, given the
+ * type of changes, and that this routine could be called on
+ * multiple cpu's simultaneously, so we lock in this routine only,
+ * to avoid conflicting updates; all we change is the shadow, and
+ * it's a single 64 bit memory location, so by definition the update
+ * is atomic in terms of what other cpu's can see in testing the
+ * bits. The spin_lock overhead isn't too bad, since it only
+ * happens when all buffers are in use, so only cpu overhead, not
+ * latency or bandwidth is affected.
+ */
+ if (!dd->pioavailregs_dma)
+ return;
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (i = 0; i < piobregs; i++) {
+ u64 pchbusy, pchg, piov, pnew;
+
+ piov = le64_to_cpu(dd->pioavailregs_dma[i]);
+ pchg = dd->pioavailkernel[i] &
+ ~(dd->pioavailshadow[i] ^ piov);
+ pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
+ if (pchg && (pchbusy & dd->pioavailshadow[i])) {
+ pnew = dd->pioavailshadow[i] & ~pchbusy;
+ pnew |= piov & pchbusy;
+ dd->pioavailshadow[i] = pnew;
+ }
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/*
+ * Debugging code and stats updates if no pio buffers available.
+ */
+static noinline void no_send_bufs(struct qib_devdata *dd)
+{
+ dd->upd_pio_shadow = 1;
+
+ /* not atomic, but if we lose a stat count in a while, that's OK */
+ qib_stats.sps_nopiobufs++;
+}
+
+/*
+ * Common code for normal driver send buffer allocation, and reserved
+ * allocation.
+ *
+ * Do appropriate marking as busy, etc.
+ * Returns buffer pointer if one is found, otherwise NULL.
+ */
+u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
+ u32 first, u32 last)
+{
+ unsigned i, j, updated = 0;
+ unsigned nbufs;
+ unsigned long flags;
+ unsigned long *shadow = dd->pioavailshadow;
+ u32 __iomem *buf;
+
+ if (!(dd->flags & QIB_PRESENT))
+ return NULL;
+
+ nbufs = last - first + 1; /* number in range to check */
+ if (dd->upd_pio_shadow) {
+ /*
+ * Minor optimization. If we had no buffers on last call,
+ * start out by doing the update; continue and do scan even
+ * if no buffers were updated, to be paranoid.
+ */
+ update_send_bufs(dd);
+ updated++;
+ }
+ i = first;
+rescan:
+ /*
+ * While test_and_set_bit() is atomic, we do that and then the
+ * change_bit(), and the pair is not. See if this is the cause
+ * of the remaining armlaunch errors.
+ */
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (j = 0; j < nbufs; j++, i++) {
+ if (i > last)
+ i = first;
+ if (__test_and_set_bit((2 * i) + 1, shadow))
+ continue;
+ /* flip generation bit */
+ __change_bit(2 * i, shadow);
+ /* remember that the buffer can be written to now */
+ __set_bit(i, dd->pio_writing);
+ break;
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+
+ if (j == nbufs) {
+ if (!updated) {
+ /*
+ * First time through; shadow exhausted, but may be
+ * buffers available, try an update and then rescan.
+ */
+ update_send_bufs(dd);
+ updated++;
+ i = first;
+ goto rescan;
+ }
+ no_send_bufs(dd);
+ buf = NULL;
+ } else {
+ if (i < dd->piobcnt2k)
+ buf = (u32 __iomem *)(dd->pio2kbase +
+ i * dd->palign);
+ else
+ buf = (u32 __iomem *)(dd->pio4kbase +
+ (i - dd->piobcnt2k) * dd->align4k);
+ if (pbufnum)
+ *pbufnum = i;
+ dd->upd_pio_shadow = 0;
+ }
+
+ return buf;
+}
+
+/*
+ * Record that the caller is finished writing to the buffer so we don't
+ * disarm it while it is being written and disarm it now if needed.
+ */
+void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ __clear_bit(n, dd->pio_writing);
+ if (__test_and_clear_bit(n, dd->pio_need_disarm))
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/**
+ * qib_chg_pioavailkernel - change which send buffers are available for kernel
+ * @dd: the qlogic_ib device
+ * @start: the starting send buffer number
+ * @len: the number of send buffers
+ * @avail: true if the buffers are available for kernel use, false otherwise
+ */
+void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
+ unsigned len, u32 avail, struct qib_ctxtdata *rcd)
+{
+ unsigned long flags;
+ unsigned end;
+ unsigned ostart = start;
+
+ /* There are two bits per send buffer (busy and generation) */
+ start *= 2;
+ end = start + len * 2;
+
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ /* Set or clear the busy bit in the shadow. */
+ while (start < end) {
+ if (avail) {
+ unsigned long dma;
+ int i;
+
+ /*
+ * The BUSY bit will never be set, because we disarm
+ * the user buffers before we hand them back to the
+ * kernel. We do have to make sure the generation
+ * bit is set correctly in shadow, since it could
+ * have changed many times while allocated to user.
+ * We can't use the bitmap functions on the full
+ * dma array because it is always little-endian, so
+ * we have to flip to host-order first.
+ * BITS_PER_LONG is slightly wrong, since it's
+ * always 64 bits per register in chip...
+ * We only work on 64 bit kernels, so that's OK.
+ */
+ i = start / BITS_PER_LONG;
+ __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
+ dd->pioavailshadow);
+ dma = (unsigned long)
+ le64_to_cpu(dd->pioavailregs_dma[i]);
+ if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
+ start) % BITS_PER_LONG, &dma))
+ __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
+ start, dd->pioavailshadow);
+ else
+ __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
+ + start, dd->pioavailshadow);
+ __set_bit(start, dd->pioavailkernel);
+ } else {
+ __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
+ dd->pioavailshadow);
+ __clear_bit(start, dd->pioavailkernel);
+ }
+ start += 2;
+ }
+
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+
+ dd->f_txchk_change(dd, ostart, len, avail, rcd);
+}
+
+/*
+ * Flush all sends that might be in the ready to send state, as well as any
+ * that are in the process of being sent. Used whenever we need to be
+ * sure the send side is idle. Cleans up all buffer state by canceling
+ * all pio buffers, and issuing an abort, which cleans up anything in the
+ * launch fifo. The cancel is superfluous on some chip versions, but
+ * it's safer to always do it.
+ * PIOAvail bits are updated by the chip as if a normal send had happened.
+ */
+void qib_cancel_sends(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+ unsigned long flags;
+ unsigned ctxt;
+ unsigned i;
+ unsigned last;
+
+ /*
+ * Tell PSM to disarm buffers again before trying to reuse them.
+ * We need to be sure the rcd doesn't change out from under us
+ * while we do so. We hold the two locks sequentially. We might
+ * needlessly set some need_disarm bits as a result, if the
+ * context is closed after we release the uctxt_lock, but that's
+ * fairly benign, and safer than nesting the locks.
+ */
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ rcd = dd->rcd[ctxt];
+ if (rcd && rcd->ppd == ppd) {
+ last = rcd->pio_base + rcd->piocnt;
+ if (rcd->user_event_mask) {
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt,
+ * if any
+ */
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ i = rcd->pio_base;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (; i < last; i++)
+ __set_bit(i, dd->pio_need_disarm);
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+ } else
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ }
+
+ if (!(dd->flags & QIB_HAS_SEND_DMA))
+ dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
+ QIB_SENDCTRL_FLUSH);
+}
+
+/*
+ * Force an update of in-memory copy of the pioavail registers, when
+ * needed for any of a variety of reasons.
+ * If already off, this routine is a nop, on the assumption that the
+ * caller (or set of callers) will "do the right thing".
+ * This is a per-device operation, so just the first port.
+ */
+void qib_force_pio_avail_update(struct qib_devdata *dd)
+{
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+}
+
+void qib_hol_down(struct qib_pportdata *ppd)
+{
+ /*
+ * Cancel sends when the link goes DOWN so that we aren't doing it
+ * at INIT when we might be trying to send SMI packets.
+ */
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ qib_cancel_sends(ppd);
+}
+
+/*
+ * Link is at INIT.
+ * We start the HoL timer so we can detect stuck packets blocking SMP replies.
+ * Timer may already be running, so use mod_timer, not add_timer.
+ */
+void qib_hol_init(struct qib_pportdata *ppd)
+{
+ if (ppd->hol_state != QIB_HOL_INIT) {
+ ppd->hol_state = QIB_HOL_INIT;
+ mod_timer(&ppd->hol_timer,
+ jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
+ }
+}
+
+/*
+ * Link is up, continue any user processes, and ensure timer
+ * is a nop, if running. Let timer keep running, if set; it
+ * will nop when it sees the link is up.
+ */
+void qib_hol_up(struct qib_pportdata *ppd)
+{
+ ppd->hol_state = QIB_HOL_UP;
+}
+
+/*
+ * This is only called via the timer.
+ */
+void qib_hol_event(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+ /* If hardware error, etc, skip. */
+ if (!(ppd->dd->flags & QIB_INITTED))
+ return;
+
+ if (ppd->hol_state != QIB_HOL_UP) {
+ /*
+ * Try to flush sends in case a stuck packet is blocking
+ * SMP replies.
+ */
+ qib_hol_down(ppd);
+ mod_timer(&ppd->hol_timer,
+ jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
new file mode 100644
index 000000000000..6c7fe78cca64
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_UC_##x
+
+/**
+ * qib_make_uc_req - construct a request packet (SEND, RDMA write)
+ * @qp: a pointer to the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_uc_req(struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ struct qib_swqe *wqe;
+ unsigned long flags;
+ u32 hwords;
+ u32 bth0;
+ u32 len;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ int ret = 0;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
+ if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
+
+ ohdr = &qp->s_hdr.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &qp->s_hdr.u.l.oth;
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ bth0 = 0;
+
+ /* Get the next send request. */
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ qp->s_wqe = NULL;
+ switch (qp->s_state) {
+ default:
+ if (!(ib_qib_state_ops[qp->state] &
+ QIB_PROCESS_NEXT_SEND_OK))
+ goto bail;
+ /* Check if send work queue is empty. */
+ if (qp->s_cur == qp->s_head)
+ goto bail;
+ /*
+ * Start a new request.
+ */
+ wqe->psn = qp->s_next_psn;
+ qp->s_psn = qp->s_next_psn;
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ len = wqe->length;
+ qp->s_len = len;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ if (len > pmtu) {
+ qp->s_state = OP(SEND_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_ONLY);
+ else {
+ qp->s_state =
+ OP(SEND_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ hwords += sizeof(struct ib_reth) / 4;
+ if (len > pmtu) {
+ qp->s_state = OP(RDMA_WRITE_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_ONLY);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the RETH */
+ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ default:
+ goto bail;
+ }
+ break;
+
+ case OP(SEND_FIRST):
+ qp->s_state = OP(SEND_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_LAST);
+ else {
+ qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ qp->s_state = OP(RDMA_WRITE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_LAST);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+ }
+ qp->s_len -= len;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_sge = &qp->s_sge;
+ qp->s_cur_size = len;
+ qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
+ qp->s_next_psn++ & QIB_PSN_MASK);
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/**
+ * qib_uc_rcv - handle an incoming UC packet
+ * @ibp: the port the packet came in on
+ * @hdr: the header of the packet
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the length of the packet
+ * @qp: the QP for this packet.
+ *
+ * This is called from qib_qp_rcv() to process an incoming UC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ unsigned long flags;
+ u32 opcode;
+ u32 hdrsize;
+ u32 psn;
+ u32 pad;
+ struct ib_wc wc;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ struct ib_reth *reth;
+ int ret;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12; /* LRH + BTH */
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
+ }
+
+ opcode = be32_to_cpu(ohdr->bth[0]);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+ goto sunlock;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ opcode >>= 24;
+ memset(&wc, 0, sizeof wc);
+
+ /* Prevent simultaneous processing after APM on different CPUs */
+ spin_lock(&qp->r_lock);
+
+ /* Compare the PSN verses the expected PSN. */
+ if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
+ /*
+ * Handle a sequence error.
+ * Silently drop any current message.
+ */
+ qp->r_psn = psn;
+inv:
+ if (qp->r_state == OP(SEND_FIRST) ||
+ qp->r_state == OP(SEND_MIDDLE)) {
+ set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ qp->r_sge.num_sge = 0;
+ } else
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ qp->r_state = OP(SEND_LAST);
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ goto send_first;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ goto rdma_first;
+
+ default:
+ goto drop;
+ }
+ }
+
+ /* Check for opcode sequence errors. */
+ switch (qp->r_state) {
+ case OP(SEND_FIRST):
+ case OP(SEND_MIDDLE):
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ break;
+ goto inv;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_MIDDLE):
+ if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ break;
+ goto inv;
+
+ default:
+ if (opcode == OP(SEND_FIRST) ||
+ opcode == OP(SEND_ONLY) ||
+ opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_FIRST) ||
+ opcode == OP(RDMA_WRITE_ONLY) ||
+ opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ break;
+ goto inv;
+ }
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
+ qp->r_flags |= QIB_R_COMM_EST;
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_COMM_EST;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ }
+
+ /* OK, process the packet. */
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+send_first:
+ if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ qp->r_sge = qp->s_rdma_read_sge;
+ else {
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto drop;
+ /*
+ * qp->s_rdma_read_sge will be the owner
+ * of the mr references.
+ */
+ qp->s_rdma_read_sge = qp->r_sge;
+ }
+ qp->r_rcv_len = 0;
+ if (opcode == OP(SEND_ONLY))
+ goto send_last;
+ else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
+ goto send_last_imm;
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto rewind;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto rewind;
+ qib_copy_sge(&qp->r_sge, data, pmtu, 0);
+ break;
+
+ case OP(SEND_LAST_WITH_IMMEDIATE):
+send_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST):
+send_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto rewind;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ wc.byte_len = tlen + qp->r_rcv_len;
+ if (unlikely(wc.byte_len > qp->r_len))
+ goto rewind;
+ wc.opcode = IB_WC_RECV;
+last_imm:
+ qib_copy_sge(&qp->r_sge, data, tlen, 0);
+ while (qp->s_rdma_read_sge.num_sge) {
+ atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
+ if (--qp->s_rdma_read_sge.num_sge)
+ qp->s_rdma_read_sge.sge =
+ *qp->s_rdma_read_sge.sg_list++;
+ }
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
+rdma_first:
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE))) {
+ goto drop;
+ }
+ reth = &ohdr->u.rc.reth;
+ hdrsize += sizeof(*reth);
+ qp->r_len = be32_to_cpu(reth->length);
+ qp->r_rcv_len = 0;
+ qp->r_sge.sg_list = NULL;
+ if (qp->r_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey */
+ ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
+ vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
+ if (unlikely(!ok))
+ goto drop;
+ qp->r_sge.num_sge = 1;
+ } else {
+ qp->r_sge.num_sge = 0;
+ qp->r_sge.sge.mr = NULL;
+ qp->r_sge.sge.vaddr = NULL;
+ qp->r_sge.sge.length = 0;
+ qp->r_sge.sge.sge_length = 0;
+ }
+ if (opcode == OP(RDMA_WRITE_ONLY))
+ goto rdma_last;
+ else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ goto rdma_last_imm;
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto drop;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto drop;
+ qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ break;
+
+ case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+rdma_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto drop;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+ goto drop;
+ if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ while (qp->s_rdma_read_sge.num_sge) {
+ atomic_dec(&qp->s_rdma_read_sge.sge.mr->
+ refcount);
+ if (--qp->s_rdma_read_sge.num_sge)
+ qp->s_rdma_read_sge.sge =
+ *qp->s_rdma_read_sge.sg_list++;
+ }
+ else {
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto drop;
+ }
+ wc.byte_len = qp->r_len;
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ goto last_imm;
+
+ case OP(RDMA_WRITE_LAST):
+rdma_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto drop;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+ goto drop;
+ qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ break;
+
+ default:
+ /* Drop packet for unknown opcodes. */
+ goto drop;
+ }
+ qp->r_psn++;
+ qp->r_state = opcode;
+ spin_unlock(&qp->r_lock);
+ return;
+
+rewind:
+ set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ qp->r_sge.num_sge = 0;
+drop:
+ ibp->n_pkt_drops++;
+ spin_unlock(&qp->r_lock);
+ return;
+
+op_err:
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ spin_unlock(&qp->r_lock);
+ return;
+
+sunlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
new file mode 100644
index 000000000000..c838cda73347
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+/**
+ * qib_ud_loopback - handle send on loopback QPs
+ * @sqp: the sending QP
+ * @swqe: the send work request
+ *
+ * This is called from qib_make_ud_req() to forward a WQE addressed
+ * to the same HCA.
+ * Note that the receive interrupt handler may be calling qib_ud_rcv()
+ * while this is being called.
+ */
+static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
+{
+ struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
+ struct qib_pportdata *ppd;
+ struct qib_qp *qp;
+ struct ib_ah_attr *ah_attr;
+ unsigned long flags;
+ struct qib_sge_state ssge;
+ struct qib_sge *sge;
+ struct ib_wc wc;
+ u32 length;
+
+ qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
+ if (!qp) {
+ ibp->n_pkt_drops++;
+ return;
+ }
+ if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
+ !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ goto drop;
+ }
+
+ ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
+ ppd = ppd_from_ibp(ibp);
+
+ if (qp->ibqp.qp_num > 1) {
+ u16 pkey1;
+ u16 pkey2;
+ u16 lid;
+
+ pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
+ pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
+ if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
+ lid = ppd->lid | (ah_attr->src_path_bits &
+ ((1 << ppd->lmc) - 1));
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
+ ah_attr->sl,
+ sqp->ibqp.qp_num, qp->ibqp.qp_num,
+ cpu_to_be16(lid),
+ cpu_to_be16(ah_attr->dlid));
+ goto drop;
+ }
+ }
+
+ /*
+ * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ if (qp->ibqp.qp_num) {
+ u32 qkey;
+
+ qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
+ sqp->qkey : swqe->wr.wr.ud.remote_qkey;
+ if (unlikely(qkey != qp->qkey)) {
+ u16 lid;
+
+ lid = ppd->lid | (ah_attr->src_path_bits &
+ ((1 << ppd->lmc) - 1));
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+ ah_attr->sl,
+ sqp->ibqp.qp_num, qp->ibqp.qp_num,
+ cpu_to_be16(lid),
+ cpu_to_be16(ah_attr->dlid));
+ goto drop;
+ }
+ }
+
+ /*
+ * A GRH is expected to preceed the data even if not
+ * present on the wire.
+ */
+ length = swqe->length;
+ memset(&wc, 0, sizeof wc);
+ wc.byte_len = length + sizeof(struct ib_grh);
+
+ if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = swqe->wr.ex.imm_data;
+ }
+
+ spin_lock_irqsave(&qp->r_lock, flags);
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ */
+ if (qp->r_flags & QIB_R_REUSE_SGE)
+ qp->r_flags &= ~QIB_R_REUSE_SGE;
+ else {
+ int ret;
+
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0) {
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ goto bail_unlock;
+ }
+ if (!ret) {
+ if (qp->ibqp.qp_num == 0)
+ ibp->n_vl15_dropped++;
+ goto bail_unlock;
+ }
+ }
+ /* Silently drop packets which are too big. */
+ if (unlikely(wc.byte_len > qp->r_len)) {
+ qp->r_flags |= QIB_R_REUSE_SGE;
+ ibp->n_pkt_drops++;
+ goto bail_unlock;
+ }
+
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ qib_copy_sge(&qp->r_sge, &ah_attr->grh,
+ sizeof(struct ib_grh), 1);
+ wc.wc_flags |= IB_WC_GRH;
+ } else
+ qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+ ssge.sg_list = swqe->sg_list + 1;
+ ssge.sge = *swqe->sg_list;
+ ssge.num_sge = swqe->wr.num_sge;
+ sge = &ssge.sge;
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ssge.num_sge)
+ *sge = *ssge.sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ goto bail_unlock;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = sqp->ibqp.qp_num;
+ wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
+ swqe->wr.wr.ud.pkey_index : 0;
+ wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
+ wc.sl = ah_attr->sl;
+ wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
+ wc.port_num = qp->port_num;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ swqe->wr.send_flags & IB_SEND_SOLICITED);
+ ibp->n_loop_pkts++;
+bail_unlock:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+drop:
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+/**
+ * qib_make_ud_req - construct a UD request packet
+ * @qp: the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_ud_req(struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ struct ib_ah_attr *ah_attr;
+ struct qib_pportdata *ppd;
+ struct qib_ibport *ibp;
+ struct qib_swqe *wqe;
+ unsigned long flags;
+ u32 nwords;
+ u32 extra_bytes;
+ u32 bth0;
+ u16 lrh0;
+ u16 lid;
+ int ret = 0;
+ int next_cur;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
+
+ if (qp->s_cur == qp->s_head)
+ goto bail;
+
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ next_cur = qp->s_cur + 1;
+ if (next_cur >= qp->s_size)
+ next_cur = 0;
+
+ /* Construct the header. */
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
+ if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
+ if (ah_attr->dlid != QIB_PERMISSIVE_LID)
+ ibp->n_multicast_xmit++;
+ else
+ ibp->n_unicast_xmit++;
+ } else {
+ ibp->n_unicast_xmit++;
+ lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
+ if (unlikely(lid == ppd->lid)) {
+ /*
+ * If DMAs are in progress, we can't generate
+ * a completion for the loopback packet since
+ * it would be out of order.
+ * XXX Instead of waiting, we could queue a
+ * zero length descriptor so we get a callback.
+ */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ qp->s_cur = next_cur;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qib_ud_loopback(qp, wqe);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qib_send_complete(qp, wqe, IB_WC_SUCCESS);
+ goto done;
+ }
+ }
+
+ qp->s_cur = next_cur;
+ extra_bytes = -wqe->length & 3;
+ nwords = (wqe->length + extra_bytes) >> 2;
+
+ /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
+ qp->s_hdrwords = 7;
+ qp->s_cur_size = wqe->length;
+ qp->s_cur_sge = &qp->s_sge;
+ qp->s_srate = ah_attr->static_rate;
+ qp->s_wqe = wqe;
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ /* Header size in 32-bit words. */
+ qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
+ &ah_attr->grh,
+ qp->s_hdrwords, nwords);
+ lrh0 = QIB_LRH_GRH;
+ ohdr = &qp->s_hdr.u.l.oth;
+ /*
+ * Don't worry about sending to locally attached multicast
+ * QPs. It is unspecified by the spec. what happens.
+ */
+ } else {
+ /* Header size in 32-bit words. */
+ lrh0 = QIB_LRH_BTH;
+ ohdr = &qp->s_hdr.u.oth;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ qp->s_hdrwords++;
+ ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
+ bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
+ } else
+ bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
+ lrh0 |= ah_attr->sl << 4;
+ if (qp->ibqp.qp_type == IB_QPT_SMI)
+ lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
+ else
+ lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
+ qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+ qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
+ qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ lid = ppd->lid;
+ if (lid) {
+ lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
+ qp->s_hdr.lrh[3] = cpu_to_be16(lid);
+ } else
+ qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth0 |= extra_bytes << 20;
+ bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
+ qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
+ wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ /*
+ * Use the multicast QP if the destination LID is a multicast LID.
+ */
+ ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
+ ah_attr->dlid != QIB_PERMISSIVE_LID ?
+ cpu_to_be32(QIB_MULTICAST_QPN) :
+ cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
+ /*
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
+ qp->qkey : wqe->wr.wr.ud.remote_qkey);
+ ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
+
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
+{
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = ppd->dd;
+ unsigned ctxt = ppd->hw_pidx;
+ unsigned i;
+
+ pkey &= 0x7fff; /* remove limited/full membership bit */
+
+ for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
+ if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
+ return i;
+
+ /*
+ * Should not get here, this means hardware failed to validate pkeys.
+ * Punt and return index 0.
+ */
+ return 0;
+}
+
+/**
+ * qib_ud_rcv - receive an incoming UD packet
+ * @ibp: the port the packet came in on
+ * @hdr: the packet header
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP the packet came on
+ *
+ * This is called from qib_qp_rcv() to process an incoming UD packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ int opcode;
+ u32 hdrsize;
+ u32 pad;
+ struct ib_wc wc;
+ u32 qkey;
+ u32 src_qp;
+ u16 dlid;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
+ }
+ qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
+ src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
+
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ if (unlikely(tlen < (hdrsize + pad + 4))) {
+ /* Drop incomplete packets. */
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ tlen -= hdrsize + pad + 4;
+
+ /*
+ * Check that the permissive LID is only used on QP0
+ * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
+ */
+ if (qp->ibqp.qp_num) {
+ if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
+ hdr->lrh[3] == IB_LID_PERMISSIVE)) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ if (qp->ibqp.qp_num > 1) {
+ u16 pkey1, pkey2;
+
+ pkey1 = be32_to_cpu(ohdr->bth[0]);
+ pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
+ if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ pkey1,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) &
+ 0xF,
+ src_qp, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto bail;
+ }
+ }
+ if (unlikely(qkey != qp->qkey)) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ src_qp, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto bail;
+ }
+ /* Drop invalid MAD packets (see 13.5.3.1). */
+ if (unlikely(qp->ibqp.qp_num == 1 &&
+ (tlen != 256 ||
+ (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ } else {
+ struct ib_smp *smp;
+
+ /* Drop invalid MAD packets (see 13.5.3.1). */
+ if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ smp = (struct ib_smp *) data;
+ if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
+ hdr->lrh[3] == IB_LID_PERMISSIVE) &&
+ smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ }
+
+ /*
+ * The opcode is in the low byte when its in network order
+ * (top byte when in host order).
+ */
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ if (qp->ibqp.qp_num > 1 &&
+ opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+ wc.ex.imm_data = ohdr->u.ud.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ hdrsize += sizeof(u32);
+ } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+ wc.ex.imm_data = 0;
+ wc.wc_flags = 0;
+ } else {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+
+ /*
+ * A GRH is expected to preceed the data even if not
+ * present on the wire.
+ */
+ wc.byte_len = tlen + sizeof(struct ib_grh);
+
+ /*
+ * We need to serialize getting a receive work queue entry and
+ * generating a completion for it against QPs sending to this QP
+ * locally.
+ */
+ spin_lock(&qp->r_lock);
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ */
+ if (qp->r_flags & QIB_R_REUSE_SGE)
+ qp->r_flags &= ~QIB_R_REUSE_SGE;
+ else {
+ int ret;
+
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0) {
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ goto bail_unlock;
+ }
+ if (!ret) {
+ if (qp->ibqp.qp_num == 0)
+ ibp->n_vl15_dropped++;
+ goto bail_unlock;
+ }
+ }
+ /* Silently drop packets which are too big. */
+ if (unlikely(wc.byte_len > qp->r_len)) {
+ qp->r_flags |= QIB_R_REUSE_SGE;
+ ibp->n_pkt_drops++;
+ goto bail_unlock;
+ }
+ if (has_grh) {
+ qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
+ sizeof(struct ib_grh), 1);
+ wc.wc_flags |= IB_WC_GRH;
+ } else
+ qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+ qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ goto bail_unlock;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.vendor_err = 0;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = src_qp;
+ wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
+ qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
+ wc.slid = be16_to_cpu(hdr->lrh[3]);
+ wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
+ dlid = be16_to_cpu(hdr->lrh[1]);
+ /*
+ * Save the LMC lower bits if the destination LID is a unicast LID.
+ */
+ wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
+ dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
+ wc.port_num = qp->port_num;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+bail_unlock:
+ spin_unlock(&qp->r_lock);
+bail:;
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
new file mode 100644
index 000000000000..d7a26c1d4f37
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mm.h>
+#include <linux/device.h>
+
+#include "qib.h"
+
+static void __qib_release_user_pages(struct page **p, size_t num_pages,
+ int dirty)
+{
+ size_t i;
+
+ for (i = 0; i < num_pages; i++) {
+ if (dirty)
+ set_page_dirty_lock(p[i]);
+ put_page(p[i]);
+ }
+}
+
+/*
+ * Call with current->mm->mmap_sem held.
+ */
+static int __get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p, struct vm_area_struct **vma)
+{
+ unsigned long lock_limit;
+ size_t got;
+ int ret;
+
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ for (got = 0; got < num_pages; got += ret) {
+ ret = get_user_pages(current, current->mm,
+ start_page + got * PAGE_SIZE,
+ num_pages - got, 1, 1,
+ p + got, vma);
+ if (ret < 0)
+ goto bail_release;
+ }
+
+ current->mm->locked_vm += num_pages;
+
+ ret = 0;
+ goto bail;
+
+bail_release:
+ __qib_release_user_pages(p, got, 0);
+bail:
+ return ret;
+}
+
+/**
+ * qib_map_page - a safety wrapper around pci_map_page()
+ *
+ * A dma_addr of all 0's is interpreted by the chip as "disabled".
+ * Unfortunately, it can also be a valid dma_addr returned on some
+ * architectures.
+ *
+ * The powerpc iommu assigns dma_addrs in ascending order, so we don't
+ * have to bother with retries or mapping a dummy page to insure we
+ * don't just get the same mapping again.
+ *
+ * I'm sure we won't be so lucky with other iommu's, so FIXME.
+ */
+dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
+ unsigned long offset, size_t size, int direction)
+{
+ dma_addr_t phys;
+
+ phys = pci_map_page(hwdev, page, offset, size, direction);
+
+ if (phys == 0) {
+ pci_unmap_page(hwdev, phys, size, direction);
+ phys = pci_map_page(hwdev, page, offset, size, direction);
+ /*
+ * FIXME: If we get 0 again, we should keep this page,
+ * map another, then free the 0 page.
+ */
+ }
+
+ return phys;
+}
+
+/**
+ * qib_get_user_pages - lock user pages into memory
+ * @start_page: the start page
+ * @num_pages: the number of pages
+ * @p: the output page structures
+ *
+ * This function takes a given start page (page aligned user virtual
+ * address) and pins it and the following specified number of pages. For
+ * now, num_pages is always 1, but that will probably change at some point
+ * (because caller is doing expected sends on a single virtually contiguous
+ * buffer, so we can do all pages at once).
+ */
+int qib_get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p)
+{
+ int ret;
+
+ down_write(&current->mm->mmap_sem);
+
+ ret = __get_user_pages(start_page, num_pages, p, NULL);
+
+ up_write(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+void qib_release_user_pages(struct page **p, size_t num_pages)
+{
+ if (current->mm) /* during close after signal, mm can be NULL */
+ down_write(&current->mm->mmap_sem);
+
+ __qib_release_user_pages(p, num_pages, 1);
+
+ if (current->mm) {
+ current->mm->locked_vm -= num_pages;
+ up_write(&current->mm->mmap_sem);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
new file mode 100644
index 000000000000..4c19e06b5e85
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_user_sdma.h"
+
+/* minimum size of header */
+#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
+/* expected size of headers (for dma_pool) */
+#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
+/* attempt to drain the queue for 5secs */
+#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
+
+struct qib_user_sdma_pkt {
+ u8 naddr; /* dimension of addr (1..3) ... */
+ u32 counter; /* sdma pkts queued counter for this entry */
+ u64 added; /* global descq number of entries */
+
+ struct {
+ u32 offset; /* offset for kvaddr, addr */
+ u32 length; /* length in page */
+ u8 put_page; /* should we put_page? */
+ u8 dma_mapped; /* is page dma_mapped? */
+ struct page *page; /* may be NULL (coherent mem) */
+ void *kvaddr; /* FIXME: only for pio hack */
+ dma_addr_t addr;
+ } addr[4]; /* max pages, any more and we coalesce */
+ struct list_head list; /* list element */
+};
+
+struct qib_user_sdma_queue {
+ /*
+ * pkts sent to dma engine are queued on this
+ * list head. the type of the elements of this
+ * list are struct qib_user_sdma_pkt...
+ */
+ struct list_head sent;
+
+ /* headers with expected length are allocated from here... */
+ char header_cache_name[64];
+ struct dma_pool *header_cache;
+
+ /* packets are allocated from the slab cache... */
+ char pkt_slab_name[64];
+ struct kmem_cache *pkt_slab;
+
+ /* as packets go on the queued queue, they are counted... */
+ u32 counter;
+ u32 sent_counter;
+
+ /* dma page table */
+ struct rb_root dma_pages_root;
+
+ /* protect everything above... */
+ struct mutex lock;
+};
+
+struct qib_user_sdma_queue *
+qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
+{
+ struct qib_user_sdma_queue *pq =
+ kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
+
+ if (!pq)
+ goto done;
+
+ pq->counter = 0;
+ pq->sent_counter = 0;
+ INIT_LIST_HEAD(&pq->sent);
+
+ mutex_init(&pq->lock);
+
+ snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
+ "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
+ pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
+ sizeof(struct qib_user_sdma_pkt),
+ 0, 0, NULL);
+
+ if (!pq->pkt_slab)
+ goto err_kfree;
+
+ snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
+ "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
+ pq->header_cache = dma_pool_create(pq->header_cache_name,
+ dev,
+ QIB_USER_SDMA_EXP_HEADER_LENGTH,
+ 4, 0);
+ if (!pq->header_cache)
+ goto err_slab;
+
+ pq->dma_pages_root = RB_ROOT;
+
+ goto done;
+
+err_slab:
+ kmem_cache_destroy(pq->pkt_slab);
+err_kfree:
+ kfree(pq);
+ pq = NULL;
+
+done:
+ return pq;
+}
+
+static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
+ int i, size_t offset, size_t len,
+ int put_page, int dma_mapped,
+ struct page *page,
+ void *kvaddr, dma_addr_t dma_addr)
+{
+ pkt->addr[i].offset = offset;
+ pkt->addr[i].length = len;
+ pkt->addr[i].put_page = put_page;
+ pkt->addr[i].dma_mapped = dma_mapped;
+ pkt->addr[i].page = page;
+ pkt->addr[i].kvaddr = kvaddr;
+ pkt->addr[i].addr = dma_addr;
+}
+
+static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
+ u32 counter, size_t offset,
+ size_t len, int dma_mapped,
+ struct page *page,
+ void *kvaddr, dma_addr_t dma_addr)
+{
+ pkt->naddr = 1;
+ pkt->counter = counter;
+ qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
+ kvaddr, dma_addr);
+}
+
+/* we've too many pages in the iovec, coalesce to a single page */
+static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
+ struct qib_user_sdma_pkt *pkt,
+ const struct iovec *iov,
+ unsigned long niov)
+{
+ int ret = 0;
+ struct page *page = alloc_page(GFP_KERNEL);
+ void *mpage_save;
+ char *mpage;
+ int i;
+ int len = 0;
+ dma_addr_t dma_addr;
+
+ if (!page) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ mpage = kmap(page);
+ mpage_save = mpage;
+ for (i = 0; i < niov; i++) {
+ int cfur;
+
+ cfur = copy_from_user(mpage,
+ iov[i].iov_base, iov[i].iov_len);
+ if (cfur) {
+ ret = -EFAULT;
+ goto free_unmap;
+ }
+
+ mpage += iov[i].iov_len;
+ len += iov[i].iov_len;
+ }
+
+ dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ ret = -ENOMEM;
+ goto free_unmap;
+ }
+
+ qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
+ dma_addr);
+ pkt->naddr = 2;
+
+ goto done;
+
+free_unmap:
+ kunmap(page);
+ __free_page(page);
+done:
+ return ret;
+}
+
+/*
+ * How many pages in this iovec element?
+ */
+static int qib_user_sdma_num_pages(const struct iovec *iov)
+{
+ const unsigned long addr = (unsigned long) iov->iov_base;
+ const unsigned long len = iov->iov_len;
+ const unsigned long spage = addr & PAGE_MASK;
+ const unsigned long epage = (addr + len - 1) & PAGE_MASK;
+
+ return 1 + ((epage - spage) >> PAGE_SHIFT);
+}
+
+/*
+ * Truncate length to page boundry.
+ */
+static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
+{
+ const unsigned long offset = addr & ~PAGE_MASK;
+
+ return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
+}
+
+static void qib_user_sdma_free_pkt_frag(struct device *dev,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+ int frag)
+{
+ const int i = frag;
+
+ if (pkt->addr[i].page) {
+ if (pkt->addr[i].dma_mapped)
+ dma_unmap_page(dev,
+ pkt->addr[i].addr,
+ pkt->addr[i].length,
+ DMA_TO_DEVICE);
+
+ if (pkt->addr[i].kvaddr)
+ kunmap(pkt->addr[i].page);
+
+ if (pkt->addr[i].put_page)
+ put_page(pkt->addr[i].page);
+ else
+ __free_page(pkt->addr[i].page);
+ } else if (pkt->addr[i].kvaddr)
+ /* free coherent mem from cache... */
+ dma_pool_free(pq->header_cache,
+ pkt->addr[i].kvaddr, pkt->addr[i].addr);
+}
+
+/* return number of pages pinned... */
+static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+ struct qib_user_sdma_pkt *pkt,
+ unsigned long addr, int tlen, int npages)
+{
+ struct page *pages[2];
+ int j;
+ int ret;
+
+ ret = get_user_pages(current, current->mm, addr,
+ npages, 0, 1, pages, NULL);
+
+ if (ret != npages) {
+ int i;
+
+ for (i = 0; i < ret; i++)
+ put_page(pages[i]);
+
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (j = 0; j < npages; j++) {
+ /* map the pages... */
+ const int flen = qib_user_sdma_page_length(addr, tlen);
+ dma_addr_t dma_addr =
+ dma_map_page(&dd->pcidev->dev,
+ pages[j], 0, flen, DMA_TO_DEVICE);
+ unsigned long fofs = addr & ~PAGE_MASK;
+
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
+ pages[j], kmap(pages[j]), dma_addr);
+
+ pkt->naddr++;
+ addr += flen;
+ tlen -= flen;
+ }
+
+done:
+ return ret;
+}
+
+static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+ const struct iovec *iov,
+ unsigned long niov)
+{
+ int ret = 0;
+ unsigned long idx;
+
+ for (idx = 0; idx < niov; idx++) {
+ const int npages = qib_user_sdma_num_pages(iov + idx);
+ const unsigned long addr = (unsigned long) iov[idx].iov_base;
+
+ ret = qib_user_sdma_pin_pages(dd, pkt, addr,
+ iov[idx].iov_len, npages);
+ if (ret < 0)
+ goto free_pkt;
+ }
+
+ goto done;
+
+free_pkt:
+ for (idx = 0; idx < pkt->naddr; idx++)
+ qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
+
+done:
+ return ret;
+}
+
+static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+ const struct iovec *iov,
+ unsigned long niov, int npages)
+{
+ int ret = 0;
+
+ if (npages >= ARRAY_SIZE(pkt->addr))
+ ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
+ else
+ ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
+
+ return ret;
+}
+
+/* free a packet list -- return counter value of last packet */
+static void qib_user_sdma_free_pkt_list(struct device *dev,
+ struct qib_user_sdma_queue *pq,
+ struct list_head *list)
+{
+ struct qib_user_sdma_pkt *pkt, *pkt_next;
+
+ list_for_each_entry_safe(pkt, pkt_next, list, list) {
+ int i;
+
+ for (i = 0; i < pkt->naddr; i++)
+ qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
+
+ kmem_cache_free(pq->pkt_slab, pkt);
+ }
+}
+
+/*
+ * copy headers, coalesce etc -- pq->lock must be held
+ *
+ * we queue all the packets to list, returning the
+ * number of bytes total. list must be empty initially,
+ * as, if there is an error we clean it...
+ */
+static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct list_head *list,
+ const struct iovec *iov,
+ unsigned long niov,
+ int maxpkts)
+{
+ unsigned long idx = 0;
+ int ret = 0;
+ int npkts = 0;
+ struct page *page = NULL;
+ __le32 *pbc;
+ dma_addr_t dma_addr;
+ struct qib_user_sdma_pkt *pkt = NULL;
+ size_t len;
+ size_t nw;
+ u32 counter = pq->counter;
+ int dma_mapped = 0;
+
+ while (idx < niov && npkts < maxpkts) {
+ const unsigned long addr = (unsigned long) iov[idx].iov_base;
+ const unsigned long idx_save = idx;
+ unsigned pktnw;
+ unsigned pktnwc;
+ int nfrags = 0;
+ int npages = 0;
+ int cfur;
+
+ dma_mapped = 0;
+ len = iov[idx].iov_len;
+ nw = len >> 2;
+ page = NULL;
+
+ pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto free_list;
+ }
+
+ if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
+ len > PAGE_SIZE || len & 3 || addr & 3) {
+ ret = -EINVAL;
+ goto free_pkt;
+ }
+
+ if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
+ pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
+ &dma_addr);
+ else
+ pbc = NULL;
+
+ if (!pbc) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ goto free_pkt;
+ }
+ pbc = kmap(page);
+ }
+
+ cfur = copy_from_user(pbc, iov[idx].iov_base, len);
+ if (cfur) {
+ ret = -EFAULT;
+ goto free_pbc;
+ }
+
+ /*
+ * This assignment is a bit strange. it's because the
+ * the pbc counts the number of 32 bit words in the full
+ * packet _except_ the first word of the pbc itself...
+ */
+ pktnwc = nw - 1;
+
+ /*
+ * pktnw computation yields the number of 32 bit words
+ * that the caller has indicated in the PBC. note that
+ * this is one less than the total number of words that
+ * goes to the send DMA engine as the first 32 bit word
+ * of the PBC itself is not counted. Armed with this count,
+ * we can verify that the packet is consistent with the
+ * iovec lengths.
+ */
+ pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
+ if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+
+ idx++;
+ while (pktnwc < pktnw && idx < niov) {
+ const size_t slen = iov[idx].iov_len;
+ const unsigned long faddr =
+ (unsigned long) iov[idx].iov_base;
+
+ if (slen & 3 || faddr & 3 || !slen ||
+ slen > PAGE_SIZE) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+
+ npages++;
+ if ((faddr & PAGE_MASK) !=
+ ((faddr + slen - 1) & PAGE_MASK))
+ npages++;
+
+ pktnwc += slen >> 2;
+ idx++;
+ nfrags++;
+ }
+
+ if (pktnwc != pktnw) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+
+ if (page) {
+ dma_addr = dma_map_page(&dd->pcidev->dev,
+ page, 0, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ ret = -ENOMEM;
+ goto free_pbc;
+ }
+
+ dma_mapped = 1;
+ }
+
+ qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
+ page, pbc, dma_addr);
+
+ if (nfrags) {
+ ret = qib_user_sdma_init_payload(dd, pq, pkt,
+ iov + idx_save + 1,
+ nfrags, npages);
+ if (ret < 0)
+ goto free_pbc_dma;
+ }
+
+ counter++;
+ npkts++;
+
+ list_add_tail(&pkt->list, list);
+ }
+
+ ret = idx;
+ goto done;
+
+free_pbc_dma:
+ if (dma_mapped)
+ dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
+free_pbc:
+ if (page) {
+ kunmap(page);
+ __free_page(page);
+ } else
+ dma_pool_free(pq->header_cache, pbc, dma_addr);
+free_pkt:
+ kmem_cache_free(pq->pkt_slab, pkt);
+free_list:
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
+done:
+ return ret;
+}
+
+static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
+ u32 c)
+{
+ pq->sent_counter = c;
+}
+
+/* try to clean out queue -- needs pq->lock */
+static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct list_head free_list;
+ struct qib_user_sdma_pkt *pkt;
+ struct qib_user_sdma_pkt *pkt_prev;
+ int ret = 0;
+
+ INIT_LIST_HEAD(&free_list);
+
+ list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
+ s64 descd = ppd->sdma_descq_removed - pkt->added;
+
+ if (descd < 0)
+ break;
+
+ list_move_tail(&pkt->list, &free_list);
+
+ /* one more packet cleaned */
+ ret++;
+ }
+
+ if (!list_empty(&free_list)) {
+ u32 counter;
+
+ pkt = list_entry(free_list.prev,
+ struct qib_user_sdma_pkt, list);
+ counter = pkt->counter;
+
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
+ qib_user_sdma_set_complete_counter(pq, counter);
+ }
+
+ return ret;
+}
+
+void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
+{
+ if (!pq)
+ return;
+
+ kmem_cache_destroy(pq->pkt_slab);
+ dma_pool_destroy(pq->header_cache);
+ kfree(pq);
+}
+
+/* clean descriptor queue, returns > 0 if some elements cleaned */
+static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ ret = qib_sdma_make_progress(ppd);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ return ret;
+}
+
+/* we're in close, drain packets so that we can cleanup successfully... */
+void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int i;
+
+ if (!pq)
+ return;
+
+ for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
+ mutex_lock(&pq->lock);
+ if (list_empty(&pq->sent)) {
+ mutex_unlock(&pq->lock);
+ break;
+ }
+ qib_user_sdma_hwqueue_clean(ppd);
+ qib_user_sdma_queue_clean(ppd, pq);
+ mutex_unlock(&pq->lock);
+ msleep(10);
+ }
+
+ if (!list_empty(&pq->sent)) {
+ struct list_head free_list;
+
+ qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
+ INIT_LIST_HEAD(&free_list);
+ mutex_lock(&pq->lock);
+ list_splice_init(&pq->sent, &free_list);
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
+ mutex_unlock(&pq->lock);
+ }
+}
+
+static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
+ u64 addr, u64 dwlen, u64 dwoffset)
+{
+ u8 tmpgen;
+
+ tmpgen = ppd->sdma_generation;
+
+ return cpu_to_le64(/* SDmaPhyAddr[31:0] */
+ ((addr & 0xfffffffcULL) << 32) |
+ /* SDmaGeneration[1:0] */
+ ((tmpgen & 3ULL) << 30) |
+ /* SDmaDwordCount[10:0] */
+ ((dwlen & 0x7ffULL) << 16) |
+ /* SDmaBufOffset[12:2] */
+ (dwoffset & 0x7ffULL));
+}
+
+static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
+{
+ return descq | cpu_to_le64(1ULL << 12);
+}
+
+static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
+{
+ /* last */ /* dma head */
+ return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
+}
+
+static inline __le64 qib_sdma_make_desc1(u64 addr)
+{
+ /* SDmaPhyAddr[47:32] */
+ return cpu_to_le64(addr >> 32);
+}
+
+static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
+ struct qib_user_sdma_pkt *pkt, int idx,
+ unsigned ofs, u16 tail)
+{
+ const u64 addr = (u64) pkt->addr[idx].addr +
+ (u64) pkt->addr[idx].offset;
+ const u64 dwlen = (u64) pkt->addr[idx].length / 4;
+ __le64 *descqp;
+ __le64 descq0;
+
+ descqp = &ppd->sdma_descq[tail].qw[0];
+
+ descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
+ if (idx == 0)
+ descq0 = qib_sdma_make_first_desc0(descq0);
+ if (idx == pkt->naddr - 1)
+ descq0 = qib_sdma_make_last_desc0(descq0);
+
+ descqp[0] = descq0;
+ descqp[1] = qib_sdma_make_desc1(addr);
+}
+
+/* pq->lock must be held, get packets on the wire... */
+static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq,
+ struct list_head *pktlist)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret = 0;
+ unsigned long flags;
+ u16 tail;
+ u8 generation;
+ u64 descq_added;
+
+ if (list_empty(pktlist))
+ return 0;
+
+ if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
+ return -ECOMM;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ /* keep a copy for restoring purposes in case of problems */
+ generation = ppd->sdma_generation;
+ descq_added = ppd->sdma_descq_added;
+
+ if (unlikely(!__qib_sdma_running(ppd))) {
+ ret = -ECOMM;
+ goto unlock;
+ }
+
+ tail = ppd->sdma_descq_tail;
+ while (!list_empty(pktlist)) {
+ struct qib_user_sdma_pkt *pkt =
+ list_entry(pktlist->next, struct qib_user_sdma_pkt,
+ list);
+ int i;
+ unsigned ofs = 0;
+ u16 dtail = tail;
+
+ if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
+ goto unlock_check_tail;
+
+ for (i = 0; i < pkt->naddr; i++) {
+ qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
+ ofs += pkt->addr[i].length >> 2;
+
+ if (++tail == ppd->sdma_descq_cnt) {
+ tail = 0;
+ ++ppd->sdma_generation;
+ }
+ }
+
+ if ((ofs << 2) > ppd->ibmaxlen) {
+ ret = -EMSGSIZE;
+ goto unlock;
+ }
+
+ /*
+ * If the packet is >= 2KB mtu equivalent, we have to use
+ * the large buffers, and have to mark each descriptor as
+ * part of a large buffer packet.
+ */
+ if (ofs > dd->piosize2kmax_dwords) {
+ for (i = 0; i < pkt->naddr; i++) {
+ ppd->sdma_descq[dtail].qw[0] |=
+ cpu_to_le64(1ULL << 14);
+ if (++dtail == ppd->sdma_descq_cnt)
+ dtail = 0;
+ }
+ }
+
+ ppd->sdma_descq_added += pkt->naddr;
+ pkt->added = ppd->sdma_descq_added;
+ list_move_tail(&pkt->list, &pq->sent);
+ ret++;
+ }
+
+unlock_check_tail:
+ /* advance the tail on the chip if necessary */
+ if (ppd->sdma_descq_tail != tail)
+ dd->f_sdma_update_tail(ppd, tail);
+
+unlock:
+ if (unlikely(ret < 0)) {
+ ppd->sdma_generation = generation;
+ ppd->sdma_descq_added = descq_added;
+ }
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ return ret;
+}
+
+int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
+ struct qib_user_sdma_queue *pq,
+ const struct iovec *iov,
+ unsigned long dim)
+{
+ struct qib_devdata *dd = rcd->dd;
+ struct qib_pportdata *ppd = rcd->ppd;
+ int ret = 0;
+ struct list_head list;
+ int npkts = 0;
+
+ INIT_LIST_HEAD(&list);
+
+ mutex_lock(&pq->lock);
+
+ /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
+ if (!qib_sdma_running(ppd))
+ goto done_unlock;
+
+ if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
+ qib_user_sdma_hwqueue_clean(ppd);
+ qib_user_sdma_queue_clean(ppd, pq);
+ }
+
+ while (dim) {
+ const int mxp = 8;
+
+ down_write(&current->mm->mmap_sem);
+ ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+ up_write(&current->mm->mmap_sem);
+
+ if (ret <= 0)
+ goto done_unlock;
+ else {
+ dim -= ret;
+ iov += ret;
+ }
+
+ /* force packets onto the sdma hw queue... */
+ if (!list_empty(&list)) {
+ /*
+ * Lazily clean hw queue. the 4 is a guess of about
+ * how many sdma descriptors a packet will take (it
+ * doesn't have to be perfect).
+ */
+ if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
+ qib_user_sdma_hwqueue_clean(ppd);
+ qib_user_sdma_queue_clean(ppd, pq);
+ }
+
+ ret = qib_user_sdma_push_pkts(ppd, pq, &list);
+ if (ret < 0)
+ goto done_unlock;
+ else {
+ npkts += ret;
+ pq->counter += ret;
+
+ if (!list_empty(&list))
+ goto done_unlock;
+ }
+ }
+ }
+
+done_unlock:
+ if (!list_empty(&list))
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
+ mutex_unlock(&pq->lock);
+
+ return (ret < 0) ? ret : npkts;
+}
+
+int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq)
+{
+ int ret = 0;
+
+ mutex_lock(&pq->lock);
+ qib_user_sdma_hwqueue_clean(ppd);
+ ret = qib_user_sdma_queue_clean(ppd, pq);
+ mutex_unlock(&pq->lock);
+
+ return ret;
+}
+
+u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
+{
+ return pq ? pq->sent_counter : 0;
+}
+
+u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
+{
+ return pq ? pq->counter : 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.h b/drivers/infiniband/hw/qib/qib_user_sdma.h
new file mode 100644
index 000000000000..ce8cbaf6a5c2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/device.h>
+
+struct qib_user_sdma_queue;
+
+struct qib_user_sdma_queue *
+qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
+void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq);
+
+int qib_user_sdma_writev(struct qib_ctxtdata *pd,
+ struct qib_user_sdma_queue *pq,
+ const struct iovec *iov,
+ unsigned long dim);
+
+int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq);
+
+void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq);
+
+u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq);
+u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
new file mode 100644
index 000000000000..cda8f4173d23
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -0,0 +1,2248 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/io.h>
+#include <linux/utsname.h>
+#include <linux/rculist.h>
+#include <linux/mm.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+static unsigned int ib_qib_qp_table_size = 251;
+module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
+MODULE_PARM_DESC(qp_table_size, "QP table size");
+
+unsigned int ib_qib_lkey_table_size = 16;
+module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
+ S_IRUGO);
+MODULE_PARM_DESC(lkey_table_size,
+ "LKEY table size in bits (2^n, 1 <= n <= 23)");
+
+static unsigned int ib_qib_max_pds = 0xFFFF;
+module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
+MODULE_PARM_DESC(max_pds,
+ "Maximum number of protection domains to support");
+
+static unsigned int ib_qib_max_ahs = 0xFFFF;
+module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
+
+unsigned int ib_qib_max_cqes = 0x2FFFF;
+module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqes,
+ "Maximum number of completion queue entries to support");
+
+unsigned int ib_qib_max_cqs = 0x1FFFF;
+module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
+
+unsigned int ib_qib_max_qp_wrs = 0x3FFF;
+module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
+
+unsigned int ib_qib_max_qps = 16384;
+module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
+
+unsigned int ib_qib_max_sges = 0x60;
+module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
+
+unsigned int ib_qib_max_mcast_grps = 16384;
+module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_grps,
+ "Maximum number of multicast groups to support");
+
+unsigned int ib_qib_max_mcast_qp_attached = 16;
+module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
+ uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_qp_attached,
+ "Maximum number of attached QPs to support");
+
+unsigned int ib_qib_max_srqs = 1024;
+module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
+
+unsigned int ib_qib_max_srq_sges = 128;
+module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
+
+unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
+module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
+
+static unsigned int ib_qib_disable_sma;
+module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(disable_sma, "Disable the SMA");
+
+/*
+ * Note that it is OK to post send work requests in the SQE and ERR
+ * states; qib_do_send() will process them and generate error
+ * completions as per IB 1.2 C10-96.
+ */
+const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = 0,
+ [IB_QPS_INIT] = QIB_POST_RECV_OK,
+ [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
+ [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+ QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
+ QIB_PROCESS_NEXT_SEND_OK,
+ [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+ QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
+ [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+ QIB_POST_SEND_OK | QIB_FLUSH_SEND,
+ [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
+ QIB_POST_SEND_OK | QIB_FLUSH_SEND,
+};
+
+struct qib_ucontext {
+ struct ib_ucontext ibucontext;
+};
+
+static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
+ *ibucontext)
+{
+ return container_of(ibucontext, struct qib_ucontext, ibucontext);
+}
+
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_qib_wc_opcode[] = {
+ [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [IB_WR_SEND] = IB_WC_SEND,
+ [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+ [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
+};
+
+/*
+ * System image GUID.
+ */
+__be64 ib_qib_sys_image_guid;
+
+/**
+ * qib_copy_sge - copy data to SGE memory
+ * @ss: the SGE state
+ * @data: the data to copy
+ * @length: the length of the data
+ */
+void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ memcpy(sge->vaddr, data, len);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (release)
+ atomic_dec(&sge->mr->refcount);
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ data += len;
+ length -= len;
+ }
+}
+
+/**
+ * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
+ * @ss: the SGE state
+ * @length: the number of bytes to skip
+ */
+void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (release)
+ atomic_dec(&sge->mr->refcount);
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+}
+
+/*
+ * Count the number of DMA descriptors needed to send length bytes of data.
+ * Don't modify the qib_sge_state to get the count.
+ * Return zero if any of the segments is not aligned.
+ */
+static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
+{
+ struct qib_sge *sg_list = ss->sg_list;
+ struct qib_sge sge = ss->sge;
+ u8 num_sge = ss->num_sge;
+ u32 ndesc = 1; /* count the header */
+
+ while (length) {
+ u32 len = sge.length;
+
+ if (len > length)
+ len = length;
+ if (len > sge.sge_length)
+ len = sge.sge_length;
+ BUG_ON(len == 0);
+ if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
+ (len != length && (len & (sizeof(u32) - 1)))) {
+ ndesc = 0;
+ break;
+ }
+ ndesc++;
+ sge.vaddr += len;
+ sge.length -= len;
+ sge.sge_length -= len;
+ if (sge.sge_length == 0) {
+ if (--num_sge)
+ sge = *sg_list++;
+ } else if (sge.length == 0 && sge.mr->lkey) {
+ if (++sge.n >= QIB_SEGSZ) {
+ if (++sge.m >= sge.mr->mapsz)
+ break;
+ sge.n = 0;
+ }
+ sge.vaddr =
+ sge.mr->map[sge.m]->segs[sge.n].vaddr;
+ sge.length =
+ sge.mr->map[sge.m]->segs[sge.n].length;
+ }
+ length -= len;
+ }
+ return ndesc;
+}
+
+/*
+ * Copy from the SGEs to the data buffer.
+ */
+static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ memcpy(data, sge->vaddr, len);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ data += len;
+ length -= len;
+ }
+}
+
+/**
+ * qib_post_one_send - post one RC, UC, or UD send work request
+ * @qp: the QP to post on
+ * @wr: the work request to send
+ */
+static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
+{
+ struct qib_swqe *wqe;
+ u32 next;
+ int i;
+ int j;
+ int acc;
+ int ret;
+ unsigned long flags;
+ struct qib_lkey_table *rkt;
+ struct qib_pd *pd;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Check that state is OK to post send. */
+ if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
+ goto bail_inval;
+
+ /* IB spec says that num_sge == 0 is OK. */
+ if (wr->num_sge > qp->s_max_sge)
+ goto bail_inval;
+
+ /*
+ * Don't allow RDMA reads or atomic operations on UC or
+ * undefined operations.
+ * Make sure buffer is large enough to hold the result for atomics.
+ */
+ if (wr->opcode == IB_WR_FAST_REG_MR) {
+ if (qib_fast_reg_mr(qp, wr))
+ goto bail_inval;
+ } else if (qp->ibqp.qp_type == IB_QPT_UC) {
+ if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
+ goto bail_inval;
+ } else if (qp->ibqp.qp_type != IB_QPT_RC) {
+ /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
+ if (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM)
+ goto bail_inval;
+ /* Check UD destination address PD */
+ if (qp->ibqp.pd != wr->wr.ud.ah->pd)
+ goto bail_inval;
+ } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
+ goto bail_inval;
+ else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
+ (wr->num_sge == 0 ||
+ wr->sg_list[0].length < sizeof(u64) ||
+ wr->sg_list[0].addr & (sizeof(u64) - 1)))
+ goto bail_inval;
+ else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
+ goto bail_inval;
+
+ next = qp->s_head + 1;
+ if (next >= qp->s_size)
+ next = 0;
+ if (next == qp->s_last) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ rkt = &to_idev(qp->ibqp.device)->lk_table;
+ pd = to_ipd(qp->ibqp.pd);
+ wqe = get_swqe_ptr(qp, qp->s_head);
+ wqe->wr = *wr;
+ wqe->length = 0;
+ j = 0;
+ if (wr->num_sge) {
+ acc = wr->opcode >= IB_WR_RDMA_READ ?
+ IB_ACCESS_LOCAL_WRITE : 0;
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 length = wr->sg_list[i].length;
+ int ok;
+
+ if (length == 0)
+ continue;
+ ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
+ &wr->sg_list[i], acc);
+ if (!ok)
+ goto bail_inval_free;
+ wqe->length += length;
+ j++;
+ }
+ wqe->wr.num_sge = j;
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_RC) {
+ if (wqe->length > 0x80000000U)
+ goto bail_inval_free;
+ } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
+ qp->port_num - 1)->ibmtu)
+ goto bail_inval_free;
+ else
+ atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
+ wqe->ssn = qp->s_ssn++;
+ qp->s_head = next;
+
+ ret = 0;
+ goto bail;
+
+bail_inval_free:
+ while (j) {
+ struct qib_sge *sge = &wqe->sg_list[--j];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+bail_inval:
+ ret = -EINVAL;
+bail:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/**
+ * qib_post_send - post a send on a QP
+ * @ibqp: the QP to post the send on
+ * @wr: the list of work requests to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ int err = 0;
+
+ for (; wr; wr = wr->next) {
+ err = qib_post_one_send(qp, wr);
+ if (err) {
+ *bad_wr = wr;
+ goto bail;
+ }
+ }
+
+ /* Try to do the send work in the caller's context. */
+ qib_do_send(&qp->s_work);
+
+bail:
+ return err;
+}
+
+/**
+ * qib_post_receive - post a receive on a QP
+ * @ibqp: the QP to post the receive on
+ * @wr: the WR to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_rwq *wq = qp->r_rq.wq;
+ unsigned long flags;
+ int ret;
+
+ /* Check that state is OK to post receive. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ for (; wr; wr = wr->next) {
+ struct qib_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+ next = wq->head + 1;
+ if (next >= qp->r_rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
+ wq->head = next;
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_qp_rcv - processing an incoming packet on a QP
+ * @rcd: the context pointer
+ * @hdr: the packet header
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP the packet came on
+ *
+ * This is called from qib_ib_rcv() to process an incoming packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = &rcd->ppd->ibport_data;
+
+ /* Check for valid receive state. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ return;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (ib_qib_disable_sma)
+ break;
+ /* FALLTHROUGH */
+ case IB_QPT_UD:
+ qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
+ break;
+
+ case IB_QPT_RC:
+ qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
+ break;
+
+ case IB_QPT_UC:
+ qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * qib_ib_rcv - process an incoming packet
+ * @rcd: the context pointer
+ * @rhdr: the header of the packet
+ * @data: the packet payload
+ * @tlen: the packet length
+ *
+ * This is called from qib_kreceive() to process an incoming packet at
+ * interrupt level. Tlen is the length of the header + data + CRC in bytes.
+ */
+void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
+{
+ struct qib_pportdata *ppd = rcd->ppd;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ struct qib_ib_header *hdr = rhdr;
+ struct qib_other_headers *ohdr;
+ struct qib_qp *qp;
+ u32 qp_num;
+ int lnh;
+ u8 opcode;
+ u16 lid;
+
+ /* 24 == LRH+BTH+CRC */
+ if (unlikely(tlen < 24))
+ goto drop;
+
+ /* Check for a valid destination LID (see ch. 7.11.1). */
+ lid = be16_to_cpu(hdr->lrh[1]);
+ if (lid < QIB_MULTICAST_LID_BASE) {
+ lid &= ~((1 << ppd->lmc) - 1);
+ if (unlikely(lid != ppd->lid))
+ goto drop;
+ }
+
+ /* Check for GRH */
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ if (lnh == QIB_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else if (lnh == QIB_LRH_GRH) {
+ u32 vtf;
+
+ ohdr = &hdr->u.l.oth;
+ if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ } else
+ goto drop;
+
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ ibp->opstats[opcode & 0x7f].n_bytes += tlen;
+ ibp->opstats[opcode & 0x7f].n_packets++;
+
+ /* Get the destination QP number. */
+ qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+ if (qp_num == QIB_MULTICAST_QPN) {
+ struct qib_mcast *mcast;
+ struct qib_mcast_qp *p;
+
+ if (lnh != QIB_LRH_GRH)
+ goto drop;
+ mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
+ if (mcast == NULL)
+ goto drop;
+ ibp->n_multicast_rcv++;
+ list_for_each_entry_rcu(p, &mcast->qp_list, list)
+ qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
+ /*
+ * Notify qib_multicast_detach() if it is waiting for us
+ * to finish.
+ */
+ if (atomic_dec_return(&mcast->refcount) <= 1)
+ wake_up(&mcast->wait);
+ } else {
+ qp = qib_lookup_qpn(ibp, qp_num);
+ if (!qp)
+ goto drop;
+ ibp->n_unicast_rcv++;
+ qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
+ /*
+ * Notify qib_destroy_qp() if it is waiting
+ * for us to finish.
+ */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+ return;
+
+drop:
+ ibp->n_pkt_drops++;
+}
+
+/*
+ * This is called from a timer to check for QPs
+ * which need kernel memory in order to send a packet.
+ */
+static void mem_timer(unsigned long data)
+{
+ struct qib_ibdev *dev = (struct qib_ibdev *) data;
+ struct list_head *list = &dev->memwait;
+ struct qib_qp *qp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ if (!list_empty(list)) {
+ qp = list_entry(list->next, struct qib_qp, iowait);
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ if (!list_empty(list))
+ mod_timer(&dev->mem_timer, jiffies + 1);
+ }
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ if (qp) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_KMEM) {
+ qp->s_flags &= ~QIB_S_WAIT_KMEM;
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+static void update_sge(struct qib_sge_state *ss, u32 length)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ sge->vaddr += length;
+ sge->length -= length;
+ sge->sge_length -= length;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ return;
+ sge->n = 0;
+ }
+ sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+}
+
+#ifdef __LITTLE_ENDIAN
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+ return data >> shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+ return data << shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+ data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
+ data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+ return data;
+}
+#else
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+ return data << shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+ return data >> shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+ data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
+ data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+ return data;
+}
+#endif
+
+static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
+ u32 length, unsigned flush_wc)
+{
+ u32 extra = 0;
+ u32 data = 0;
+ u32 last;
+
+ while (1) {
+ u32 len = ss->sge.length;
+ u32 off;
+
+ if (len > length)
+ len = length;
+ if (len > ss->sge.sge_length)
+ len = ss->sge.sge_length;
+ BUG_ON(len == 0);
+ /* If the source address is not aligned, try to align it. */
+ off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
+ if (off) {
+ u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
+ ~(sizeof(u32) - 1));
+ u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
+ u32 y;
+
+ y = sizeof(u32) - off;
+ if (len > y)
+ len = y;
+ if (len + extra >= sizeof(u32)) {
+ data |= set_upper_bits(v, extra *
+ BITS_PER_BYTE);
+ len = sizeof(u32) - extra;
+ if (len == length) {
+ last = data;
+ break;
+ }
+ __raw_writel(data, piobuf);
+ piobuf++;
+ extra = 0;
+ data = 0;
+ } else {
+ /* Clear unused upper bytes */
+ data |= clear_upper_bytes(v, len, extra);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ extra += len;
+ }
+ } else if (extra) {
+ /* Source address is aligned. */
+ u32 *addr = (u32 *) ss->sge.vaddr;
+ int shift = extra * BITS_PER_BYTE;
+ int ushift = 32 - shift;
+ u32 l = len;
+
+ while (l >= sizeof(u32)) {
+ u32 v = *addr;
+
+ data |= set_upper_bits(v, shift);
+ __raw_writel(data, piobuf);
+ data = get_upper_bits(v, ushift);
+ piobuf++;
+ addr++;
+ l -= sizeof(u32);
+ }
+ /*
+ * We still have 'extra' number of bytes leftover.
+ */
+ if (l) {
+ u32 v = *addr;
+
+ if (l + extra >= sizeof(u32)) {
+ data |= set_upper_bits(v, shift);
+ len -= l + extra - sizeof(u32);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ __raw_writel(data, piobuf);
+ piobuf++;
+ extra = 0;
+ data = 0;
+ } else {
+ /* Clear unused upper bytes */
+ data |= clear_upper_bytes(v, l, extra);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ extra += l;
+ }
+ } else if (len == length) {
+ last = data;
+ break;
+ }
+ } else if (len == length) {
+ u32 w;
+
+ /*
+ * Need to round up for the last dword in the
+ * packet.
+ */
+ w = (len + 3) >> 2;
+ qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
+ piobuf += w - 1;
+ last = ((u32 *) ss->sge.vaddr)[w - 1];
+ break;
+ } else {
+ u32 w = len >> 2;
+
+ qib_pio_copy(piobuf, ss->sge.vaddr, w);
+ piobuf += w;
+
+ extra = len & (sizeof(u32) - 1);
+ if (extra) {
+ u32 v = ((u32 *) ss->sge.vaddr)[w];
+
+ /* Clear unused upper bytes */
+ data = clear_upper_bytes(v, extra, 0);
+ }
+ }
+ update_sge(ss, len);
+ length -= len;
+ }
+ /* Update address before sending packet. */
+ update_sge(ss, length);
+ if (flush_wc) {
+ /* must flush early everything before trigger word */
+ qib_flush_wc();
+ __raw_writel(last, piobuf);
+ /* be sure trigger word is written */
+ qib_flush_wc();
+ } else
+ __raw_writel(last, piobuf);
+}
+
+static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
+ struct qib_qp *qp, int *retp)
+{
+ struct qib_verbs_txreq *tx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ spin_lock(&dev->pending_lock);
+
+ if (!list_empty(&dev->txreq_free)) {
+ struct list_head *l = dev->txreq_free.next;
+
+ list_del(l);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ *retp = 0;
+ } else {
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
+ list_empty(&qp->iowait)) {
+ dev->n_txwait++;
+ qp->s_flags |= QIB_S_WAIT_TX;
+ list_add_tail(&qp->iowait, &dev->txwait);
+ }
+ tx = NULL;
+ qp->s_flags &= ~QIB_S_BUSY;
+ *retp = -EBUSY;
+ }
+
+ spin_unlock(&dev->pending_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ return tx;
+}
+
+void qib_put_txreq(struct qib_verbs_txreq *tx)
+{
+ struct qib_ibdev *dev;
+ struct qib_qp *qp;
+ unsigned long flags;
+
+ qp = tx->qp;
+ dev = to_idev(qp->ibqp.device);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ if (tx->mr) {
+ atomic_dec(&tx->mr->refcount);
+ tx->mr = NULL;
+ }
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
+ tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
+ dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
+ tx->txreq.addr, tx->hdr_dwords << 2,
+ DMA_TO_DEVICE);
+ kfree(tx->align_buf);
+ }
+
+ spin_lock_irqsave(&dev->pending_lock, flags);
+
+ /* Put struct back on free list */
+ list_add(&tx->txreq.list, &dev->txreq_free);
+
+ if (!list_empty(&dev->txwait)) {
+ /* Wake up first QP wanting a free struct */
+ qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_TX) {
+ qp->s_flags &= ~QIB_S_WAIT_TX;
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ } else
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+}
+
+/*
+ * This is called when there are send DMA descriptors that might be
+ * available.
+ *
+ * This is called with ppd->sdma_lock held.
+ */
+void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
+{
+ struct qib_qp *qp, *nqp;
+ struct qib_qp *qps[20];
+ struct qib_ibdev *dev;
+ unsigned i, n;
+
+ n = 0;
+ dev = &ppd->dd->verbs_dev;
+ spin_lock(&dev->pending_lock);
+
+ /* Search wait list for first QP wanting DMA descriptors. */
+ list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
+ if (qp->port_num != ppd->port)
+ continue;
+ if (n == ARRAY_SIZE(qps))
+ break;
+ if (qp->s_tx->txreq.sg_count > avail)
+ break;
+ avail -= qp->s_tx->txreq.sg_count;
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ qps[n++] = qp;
+ }
+
+ spin_unlock(&dev->pending_lock);
+
+ for (i = 0; i < n; i++) {
+ qp = qps[i];
+ spin_lock(&qp->s_lock);
+ if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
+ qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
+ qib_schedule_send(qp);
+ }
+ spin_unlock(&qp->s_lock);
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+/*
+ * This is called with ppd->sdma_lock held.
+ */
+static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
+{
+ struct qib_verbs_txreq *tx =
+ container_of(cookie, struct qib_verbs_txreq, txreq);
+ struct qib_qp *qp = tx->qp;
+
+ spin_lock(&qp->s_lock);
+ if (tx->wqe)
+ qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+ else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ struct qib_ib_header *hdr;
+
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
+ hdr = &tx->align_buf->hdr;
+ else {
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+
+ hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
+ }
+ qib_rc_send_complete(qp, hdr);
+ }
+ if (atomic_dec_and_test(&qp->s_dma_busy)) {
+ if (qp->state == IB_QPS_RESET)
+ wake_up(&qp->wait_dma);
+ else if (qp->s_flags & QIB_S_WAIT_DMA) {
+ qp->s_flags &= ~QIB_S_WAIT_DMA;
+ qib_schedule_send(qp);
+ }
+ }
+ spin_unlock(&qp->s_lock);
+
+ qib_put_txreq(tx);
+}
+
+static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ spin_lock(&dev->pending_lock);
+ if (list_empty(&qp->iowait)) {
+ if (list_empty(&dev->memwait))
+ mod_timer(&dev->mem_timer, jiffies + 1);
+ qp->s_flags |= QIB_S_WAIT_KMEM;
+ list_add_tail(&qp->iowait, &dev->memwait);
+ }
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~QIB_S_BUSY;
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ return ret;
+}
+
+static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len,
+ u32 plen, u32 dwords)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_verbs_txreq *tx;
+ struct qib_pio_header *phdr;
+ u32 control;
+ u32 ndesc;
+ int ret;
+
+ tx = qp->s_tx;
+ if (tx) {
+ qp->s_tx = NULL;
+ /* resend previously constructed packet */
+ ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
+ goto bail;
+ }
+
+ tx = get_txreq(dev, qp, &ret);
+ if (!tx)
+ goto bail;
+
+ control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
+ be16_to_cpu(hdr->lrh[0]) >> 12);
+ tx->qp = qp;
+ atomic_inc(&qp->refcount);
+ tx->wqe = qp->s_wqe;
+ tx->mr = qp->s_rdma_mr;
+ if (qp->s_rdma_mr)
+ qp->s_rdma_mr = NULL;
+ tx->txreq.callback = sdma_complete;
+ if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
+ tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
+ else
+ tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
+ if (plen + 1 > dd->piosize2kmax_dwords)
+ tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
+
+ if (len) {
+ /*
+ * Don't try to DMA if it takes more descriptors than
+ * the queue holds.
+ */
+ ndesc = qib_count_sge(ss, len);
+ if (ndesc >= ppd->sdma_descq_cnt)
+ ndesc = 0;
+ } else
+ ndesc = 1;
+ if (ndesc) {
+ phdr = &dev->pio_hdrs[tx->hdr_inx];
+ phdr->pbc[0] = cpu_to_le32(plen);
+ phdr->pbc[1] = cpu_to_le32(control);
+ memcpy(&phdr->hdr, hdr, hdrwords << 2);
+ tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
+ tx->txreq.sg_count = ndesc;
+ tx->txreq.addr = dev->pio_hdrs_phys +
+ tx->hdr_inx * sizeof(struct qib_pio_header);
+ tx->hdr_dwords = hdrwords + 2; /* add PBC length */
+ ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
+ goto bail;
+ }
+
+ /* Allocate a buffer and copy the header and payload to it. */
+ tx->hdr_dwords = plen + 1;
+ phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
+ if (!phdr)
+ goto err_tx;
+ phdr->pbc[0] = cpu_to_le32(plen);
+ phdr->pbc[1] = cpu_to_le32(control);
+ memcpy(&phdr->hdr, hdr, hdrwords << 2);
+ qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
+
+ tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
+ tx->hdr_dwords << 2, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
+ goto map_err;
+ tx->align_buf = phdr;
+ tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
+ tx->txreq.sg_count = 1;
+ ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
+ goto unaligned;
+
+map_err:
+ kfree(phdr);
+err_tx:
+ qib_put_txreq(tx);
+ ret = wait_kmem(dev, qp);
+unaligned:
+ ibp->n_unaligned++;
+bail:
+ return ret;
+}
+
+/*
+ * If we are now in the error state, return zero to flush the
+ * send work request.
+ */
+static int no_bufs_available(struct qib_qp *qp)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct qib_devdata *dd;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * Note that as soon as want_buffer() is called and
+ * possibly before it returns, qib_ib_piobufavail()
+ * could be called. Therefore, put QP on the I/O wait list before
+ * enabling the PIO avail interrupt.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ spin_lock(&dev->pending_lock);
+ if (list_empty(&qp->iowait)) {
+ dev->n_piowait++;
+ qp->s_flags |= QIB_S_WAIT_PIO;
+ list_add_tail(&qp->iowait, &dev->piowait);
+ dd = dd_from_dev(dev);
+ dd->f_wantpiobuf_intr(dd, 1);
+ }
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~QIB_S_BUSY;
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len,
+ u32 plen, u32 dwords)
+{
+ struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
+ u32 *hdr = (u32 *) ibhdr;
+ u32 __iomem *piobuf_orig;
+ u32 __iomem *piobuf;
+ u64 pbc;
+ unsigned long flags;
+ unsigned flush_wc;
+ u32 control;
+ u32 pbufn;
+
+ control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
+ be16_to_cpu(ibhdr->lrh[0]) >> 12);
+ pbc = ((u64) control << 32) | plen;
+ piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
+ if (unlikely(piobuf == NULL))
+ return no_bufs_available(qp);
+
+ /*
+ * Write the pbc.
+ * We have to flush after the PBC for correctness on some cpus
+ * or WC buffer can be written out of order.
+ */
+ writeq(pbc, piobuf);
+ piobuf_orig = piobuf;
+ piobuf += 2;
+
+ flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
+ if (len == 0) {
+ /*
+ * If there is just the header portion, must flush before
+ * writing last word of header for correctness, and after
+ * the last header word (trigger word).
+ */
+ if (flush_wc) {
+ qib_flush_wc();
+ qib_pio_copy(piobuf, hdr, hdrwords - 1);
+ qib_flush_wc();
+ __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
+ qib_flush_wc();
+ } else
+ qib_pio_copy(piobuf, hdr, hdrwords);
+ goto done;
+ }
+
+ if (flush_wc)
+ qib_flush_wc();
+ qib_pio_copy(piobuf, hdr, hdrwords);
+ piobuf += hdrwords;
+
+ /* The common case is aligned and contained in one segment. */
+ if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
+ !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
+ u32 *addr = (u32 *) ss->sge.vaddr;
+
+ /* Update address before sending packet. */
+ update_sge(ss, len);
+ if (flush_wc) {
+ qib_pio_copy(piobuf, addr, dwords - 1);
+ /* must flush early everything before trigger word */
+ qib_flush_wc();
+ __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
+ /* be sure trigger word is written */
+ qib_flush_wc();
+ } else
+ qib_pio_copy(piobuf, addr, dwords);
+ goto done;
+ }
+ copy_io(piobuf, ss, len, flush_wc);
+done:
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf_orig + spcl_off);
+ }
+ qib_sendbuf_done(dd, pbufn);
+ if (qp->s_rdma_mr) {
+ atomic_dec(&qp->s_rdma_mr->refcount);
+ qp->s_rdma_mr = NULL;
+ }
+ if (qp->s_wqe) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ } else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qib_rc_send_complete(qp, ibhdr);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * qib_verbs_send - send a packet
+ * @qp: the QP to send on
+ * @hdr: the packet header
+ * @hdrwords: the number of 32-bit words in the header
+ * @ss: the SGE to send
+ * @len: the length of the packet in bytes
+ *
+ * Return zero if packet is sent or queued OK.
+ * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
+ */
+int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len)
+{
+ struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ u32 plen;
+ int ret;
+ u32 dwords = (len + 3) >> 2;
+
+ /*
+ * Calculate the send buffer trigger address.
+ * The +1 counts for the pbc control dword following the pbc length.
+ */
+ plen = hdrwords + dwords + 1;
+
+ /*
+ * VL15 packets (IB_QPT_SMI) will always use PIO, so we
+ * can defer SDMA restart until link goes ACTIVE without
+ * worrying about just how we got there.
+ */
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ !(dd->flags & QIB_HAS_SEND_DMA))
+ ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
+ plen, dwords);
+ else
+ ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
+ plen, dwords);
+
+ return ret;
+}
+
+int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
+ u64 *rwords, u64 *spkts, u64 *rpkts,
+ u64 *xmit_wait)
+{
+ int ret;
+ struct qib_devdata *dd = ppd->dd;
+
+ if (!(dd->flags & QIB_PRESENT)) {
+ /* no hardware, freeze, etc. */
+ ret = -EINVAL;
+ goto bail;
+ }
+ *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
+ *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
+ *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
+ *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
+ *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_get_counters - get various chip counters
+ * @dd: the qlogic_ib device
+ * @cntrs: counters are placed here
+ *
+ * Return the counters needed by recv_pma_get_portcounters().
+ */
+int qib_get_counters(struct qib_pportdata *ppd,
+ struct qib_verbs_counters *cntrs)
+{
+ int ret;
+
+ if (!(ppd->dd->flags & QIB_PRESENT)) {
+ /* no hardware, freeze, etc. */
+ ret = -EINVAL;
+ goto bail;
+ }
+ cntrs->symbol_error_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
+ cntrs->link_error_recovery_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
+ /*
+ * The link downed counter counts when the other side downs the
+ * connection. We add in the number of times we downed the link
+ * due to local link integrity errors to compensate.
+ */
+ cntrs->link_downed_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
+ cntrs->port_rcv_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
+ cntrs->port_rcv_errors +=
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
+ cntrs->port_rcv_errors +=
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
+ cntrs->port_rcv_remphys_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
+ cntrs->port_xmit_discards =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
+ cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_WORDSEND);
+ cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_WORDRCV);
+ cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_PKTSEND);
+ cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_PKTRCV);
+ cntrs->local_link_integrity_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
+ cntrs->excessive_buffer_overrun_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
+ cntrs->vl15_dropped =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_ib_piobufavail - callback when a PIO buffer is available
+ * @dd: the device pointer
+ *
+ * This is called from qib_intr() at interrupt level when a PIO buffer is
+ * available after qib_verbs_send() returned an error that no buffers were
+ * available. Disable the interrupt if there are no more QPs waiting.
+ */
+void qib_ib_piobufavail(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ struct list_head *list;
+ struct qib_qp *qps[5];
+ struct qib_qp *qp;
+ unsigned long flags;
+ unsigned i, n;
+
+ list = &dev->piowait;
+ n = 0;
+
+ /*
+ * Note: checking that the piowait list is empty and clearing
+ * the buffer available interrupt needs to be atomic or we
+ * could end up with QPs on the wait list with the interrupt
+ * disabled.
+ */
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ while (!list_empty(list)) {
+ if (n == ARRAY_SIZE(qps))
+ goto full;
+ qp = list_entry(list->next, struct qib_qp, iowait);
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ qps[n++] = qp;
+ }
+ dd->f_wantpiobuf_intr(dd, 0);
+full:
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ for (i = 0; i < n; i++) {
+ qp = qps[i];
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_PIO) {
+ qp->s_flags &= ~QIB_S_WAIT_PIO;
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Notify qib_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+static int qib_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibdev *dev = to_idev(ibdev);
+
+ memset(props, 0, sizeof(*props));
+
+ props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
+ IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
+ IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
+ props->page_size_cap = PAGE_SIZE;
+ props->vendor_id =
+ QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
+ props->vendor_part_id = dd->deviceid;
+ props->hw_ver = dd->minrev;
+ props->sys_image_guid = ib_qib_sys_image_guid;
+ props->max_mr_size = ~0ULL;
+ props->max_qp = ib_qib_max_qps;
+ props->max_qp_wr = ib_qib_max_qp_wrs;
+ props->max_sge = ib_qib_max_sges;
+ props->max_cq = ib_qib_max_cqs;
+ props->max_ah = ib_qib_max_ahs;
+ props->max_cqe = ib_qib_max_cqes;
+ props->max_mr = dev->lk_table.max;
+ props->max_fmr = dev->lk_table.max;
+ props->max_map_per_fmr = 32767;
+ props->max_pd = ib_qib_max_pds;
+ props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
+ props->max_qp_init_rd_atom = 255;
+ /* props->max_res_rd_atom */
+ props->max_srq = ib_qib_max_srqs;
+ props->max_srq_wr = ib_qib_max_srq_wrs;
+ props->max_srq_sge = ib_qib_max_srq_sges;
+ /* props->local_ca_ack_delay */
+ props->atomic_cap = IB_ATOMIC_GLOB;
+ props->max_pkeys = qib_get_npkeys(dd);
+ props->max_mcast_grp = ib_qib_max_mcast_grps;
+ props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
+ props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
+ props->max_mcast_grp;
+
+ return 0;
+}
+
+static int qib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ enum ib_mtu mtu;
+ u16 lid = ppd->lid;
+
+ memset(props, 0, sizeof(*props));
+ props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
+ props->lmc = ppd->lmc;
+ props->sm_lid = ibp->sm_lid;
+ props->sm_sl = ibp->sm_sl;
+ props->state = dd->f_iblink_state(ppd->lastibcstat);
+ props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
+ props->port_cap_flags = ibp->port_cap_flags;
+ props->gid_tbl_len = QIB_GUIDS_PER_PORT;
+ props->max_msg_sz = 0x80000000;
+ props->pkey_tbl_len = qib_get_npkeys(dd);
+ props->bad_pkey_cntr = ibp->pkey_violations;
+ props->qkey_viol_cntr = ibp->qkey_violations;
+ props->active_width = ppd->link_width_active;
+ /* See rate_show() */
+ props->active_speed = ppd->link_speed_active;
+ props->max_vl_num = qib_num_vls(ppd->vls_supported);
+ props->init_type_reply = 0;
+
+ props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
+ switch (ppd->ibmtu) {
+ case 4096:
+ mtu = IB_MTU_4096;
+ break;
+ case 2048:
+ mtu = IB_MTU_2048;
+ break;
+ case 1024:
+ mtu = IB_MTU_1024;
+ break;
+ case 512:
+ mtu = IB_MTU_512;
+ break;
+ case 256:
+ mtu = IB_MTU_256;
+ break;
+ default:
+ mtu = IB_MTU_2048;
+ }
+ props->active_mtu = mtu;
+ props->subnet_timeout = ibp->subnet_timeout;
+
+ return 0;
+}
+
+static int qib_modify_device(struct ib_device *device,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ struct qib_devdata *dd = dd_from_ibdev(device);
+ unsigned i;
+ int ret;
+
+ if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC)) {
+ ret = -EOPNOTSUPP;
+ goto bail;
+ }
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ memcpy(device->node_desc, device_modify->node_desc, 64);
+ for (i = 0; i < dd->num_pports; i++) {
+ struct qib_ibport *ibp = &dd->pport[i].ibport_data;
+
+ qib_node_desc_chg(ibp);
+ }
+ }
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
+ ib_qib_sys_image_guid =
+ cpu_to_be64(device_modify->sys_image_guid);
+ for (i = 0; i < dd->num_pports; i++) {
+ struct qib_ibport *ibp = &dd->pport[i].ibport_data;
+
+ qib_sys_guid_chg(ibp);
+ }
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int qib_modify_port(struct ib_device *ibdev, u8 port,
+ int port_modify_mask, struct ib_port_modify *props)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ ibp->port_cap_flags |= props->set_port_cap_mask;
+ ibp->port_cap_flags &= ~props->clr_port_cap_mask;
+ if (props->set_port_cap_mask || props->clr_port_cap_mask)
+ qib_cap_mask_chg(ibp);
+ if (port_modify_mask & IB_PORT_SHUTDOWN)
+ qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
+ if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
+ ibp->qkey_violations = 0;
+ return 0;
+}
+
+static int qib_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ int ret = 0;
+
+ if (!port || port > dd->num_pports)
+ ret = -EINVAL;
+ else {
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ gid->global.subnet_prefix = ibp->gid_prefix;
+ if (index == 0)
+ gid->global.interface_id = ppd->guid;
+ else if (index < QIB_GUIDS_PER_PORT)
+ gid->global.interface_id = ibp->guids[index - 1];
+ else
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_pd *pd;
+ struct ib_pd *ret;
+
+ /*
+ * This is actually totally arbitrary. Some correctness tests
+ * assume there's a maximum number of PDs that can be allocated.
+ * We don't actually have this limit, but we fail the test if
+ * we allow allocations of more than we report for this value.
+ */
+
+ pd = kmalloc(sizeof *pd, GFP_KERNEL);
+ if (!pd) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ spin_lock(&dev->n_pds_lock);
+ if (dev->n_pds_allocated == ib_qib_max_pds) {
+ spin_unlock(&dev->n_pds_lock);
+ kfree(pd);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dev->n_pds_allocated++;
+ spin_unlock(&dev->n_pds_lock);
+
+ /* ib_alloc_pd() will initialize pd->ibpd. */
+ pd->user = udata != NULL;
+
+ ret = &pd->ibpd;
+
+bail:
+ return ret;
+}
+
+static int qib_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct qib_pd *pd = to_ipd(ibpd);
+ struct qib_ibdev *dev = to_idev(ibpd->device);
+
+ spin_lock(&dev->n_pds_lock);
+ dev->n_pds_allocated--;
+ spin_unlock(&dev->n_pds_lock);
+
+ kfree(pd);
+
+ return 0;
+}
+
+int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
+{
+ /* A multicast address requires a GRH (see ch. 8.4.1). */
+ if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
+ ah_attr->dlid != QIB_PERMISSIVE_LID &&
+ !(ah_attr->ah_flags & IB_AH_GRH))
+ goto bail;
+ if ((ah_attr->ah_flags & IB_AH_GRH) &&
+ ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
+ goto bail;
+ if (ah_attr->dlid == 0)
+ goto bail;
+ if (ah_attr->port_num < 1 ||
+ ah_attr->port_num > ibdev->phys_port_cnt)
+ goto bail;
+ if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
+ ib_rate_to_mult(ah_attr->static_rate) < 0)
+ goto bail;
+ if (ah_attr->sl > 15)
+ goto bail;
+ return 0;
+bail:
+ return -EINVAL;
+}
+
+/**
+ * qib_create_ah - create an address handle
+ * @pd: the protection domain
+ * @ah_attr: the attributes of the AH
+ *
+ * This may be called from interrupt context.
+ */
+static struct ib_ah *qib_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ struct qib_ah *ah;
+ struct ib_ah *ret;
+ struct qib_ibdev *dev = to_idev(pd->device);
+ unsigned long flags;
+
+ if (qib_check_ah(pd->device, ah_attr)) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+ if (!ah) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ if (dev->n_ahs_allocated == ib_qib_max_ahs) {
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+ kfree(ah);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dev->n_ahs_allocated++;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ /* ib_create_ah() will initialize ah->ibah. */
+ ah->attr = *ah_attr;
+ atomic_set(&ah->refcount, 0);
+
+ ret = &ah->ibah;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_destroy_ah - destroy an address handle
+ * @ibah: the AH to destroy
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_destroy_ah(struct ib_ah *ibah)
+{
+ struct qib_ibdev *dev = to_idev(ibah->device);
+ struct qib_ah *ah = to_iah(ibah);
+ unsigned long flags;
+
+ if (atomic_read(&ah->refcount) != 0)
+ return -EBUSY;
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ dev->n_ahs_allocated--;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ kfree(ah);
+
+ return 0;
+}
+
+static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct qib_ah *ah = to_iah(ibah);
+
+ if (qib_check_ah(ibah->device, ah_attr))
+ return -EINVAL;
+
+ ah->attr = *ah_attr;
+
+ return 0;
+}
+
+static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct qib_ah *ah = to_iah(ibah);
+
+ *ah_attr = ah->attr;
+
+ return 0;
+}
+
+/**
+ * qib_get_npkeys - return the size of the PKEY table for context 0
+ * @dd: the qlogic_ib device
+ */
+unsigned qib_get_npkeys(struct qib_devdata *dd)
+{
+ return ARRAY_SIZE(dd->rcd[0]->pkeys);
+}
+
+/*
+ * Return the indexed PKEY from the port PKEY table.
+ * No need to validate rcd[ctxt]; the port is setup if we are here.
+ */
+unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
+{
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = ppd->dd;
+ unsigned ctxt = ppd->hw_pidx;
+ unsigned ret;
+
+ /* dd->rcd null if mini_init or some init failures */
+ if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
+ ret = 0;
+ else
+ ret = dd->rcd[ctxt]->pkeys[index];
+
+ return ret;
+}
+
+static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ int ret;
+
+ if (index >= qib_get_npkeys(dd)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ *pkey = qib_get_pkey(to_iport(ibdev, port), index);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_alloc_ucontext - allocate a ucontest
+ * @ibdev: the infiniband device
+ * @udata: not used by the QLogic_IB driver
+ */
+
+static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct qib_ucontext *context;
+ struct ib_ucontext *ret;
+
+ context = kmalloc(sizeof *context, GFP_KERNEL);
+ if (!context) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ ret = &context->ibucontext;
+
+bail:
+ return ret;
+}
+
+static int qib_dealloc_ucontext(struct ib_ucontext *context)
+{
+ kfree(to_iucontext(context));
+ return 0;
+}
+
+static void init_ibport(struct qib_pportdata *ppd)
+{
+ struct qib_verbs_counters cntrs;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+
+ spin_lock_init(&ibp->lock);
+ /* Set the prefix to the default value (see ch. 4.1.1) */
+ ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
+ ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
+ ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
+ IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
+ IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
+ IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
+ IB_PORT_OTHER_LOCAL_CHANGES_SUP;
+ if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
+ ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
+ ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
+ ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
+ ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
+ ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
+ ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
+
+ /* Snapshot current HW counters to "clear" them. */
+ qib_get_counters(ppd, &cntrs);
+ ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
+ ibp->z_link_error_recovery_counter =
+ cntrs.link_error_recovery_counter;
+ ibp->z_link_downed_counter = cntrs.link_downed_counter;
+ ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+ ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
+ ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
+ ibp->z_port_xmit_data = cntrs.port_xmit_data;
+ ibp->z_port_rcv_data = cntrs.port_rcv_data;
+ ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+ ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+ ibp->z_local_link_integrity_errors =
+ cntrs.local_link_integrity_errors;
+ ibp->z_excessive_buffer_overrun_errors =
+ cntrs.excessive_buffer_overrun_errors;
+ ibp->z_vl15_dropped = cntrs.vl15_dropped;
+}
+
+/**
+ * qib_register_ib_device - register our device with the infiniband core
+ * @dd: the device data structure
+ * Return the allocated qib_ibdev pointer or NULL on error.
+ */
+int qib_register_ib_device(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ struct ib_device *ibdev = &dev->ibdev;
+ struct qib_pportdata *ppd = dd->pport;
+ unsigned i, lk_tab_size;
+ int ret;
+
+ dev->qp_table_size = ib_qib_qp_table_size;
+ dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table,
+ GFP_KERNEL);
+ if (!dev->qp_table) {
+ ret = -ENOMEM;
+ goto err_qpt;
+ }
+
+ for (i = 0; i < dd->num_pports; i++)
+ init_ibport(ppd + i);
+
+ /* Only need to initialize non-zero fields. */
+ spin_lock_init(&dev->qpt_lock);
+ spin_lock_init(&dev->n_pds_lock);
+ spin_lock_init(&dev->n_ahs_lock);
+ spin_lock_init(&dev->n_cqs_lock);
+ spin_lock_init(&dev->n_qps_lock);
+ spin_lock_init(&dev->n_srqs_lock);
+ spin_lock_init(&dev->n_mcast_grps_lock);
+ init_timer(&dev->mem_timer);
+ dev->mem_timer.function = mem_timer;
+ dev->mem_timer.data = (unsigned long) dev;
+
+ qib_init_qpn_table(dd, &dev->qpn_table);
+
+ /*
+ * The top ib_qib_lkey_table_size bits are used to index the
+ * table. The lower 8 bits can be owned by the user (copied from
+ * the LKEY). The remaining bits act as a generation number or tag.
+ */
+ spin_lock_init(&dev->lk_table.lock);
+ dev->lk_table.max = 1 << ib_qib_lkey_table_size;
+ lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+ dev->lk_table.table = (struct qib_mregion **)
+ __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
+ if (dev->lk_table.table == NULL) {
+ ret = -ENOMEM;
+ goto err_lk;
+ }
+ memset(dev->lk_table.table, 0, lk_tab_size);
+ INIT_LIST_HEAD(&dev->pending_mmaps);
+ spin_lock_init(&dev->pending_lock);
+ dev->mmap_offset = PAGE_SIZE;
+ spin_lock_init(&dev->mmap_offset_lock);
+ INIT_LIST_HEAD(&dev->piowait);
+ INIT_LIST_HEAD(&dev->dmawait);
+ INIT_LIST_HEAD(&dev->txwait);
+ INIT_LIST_HEAD(&dev->memwait);
+ INIT_LIST_HEAD(&dev->txreq_free);
+
+ if (ppd->sdma_descq_cnt) {
+ dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
+ ppd->sdma_descq_cnt *
+ sizeof(struct qib_pio_header),
+ &dev->pio_hdrs_phys,
+ GFP_KERNEL);
+ if (!dev->pio_hdrs) {
+ ret = -ENOMEM;
+ goto err_hdrs;
+ }
+ }
+
+ for (i = 0; i < ppd->sdma_descq_cnt; i++) {
+ struct qib_verbs_txreq *tx;
+
+ tx = kzalloc(sizeof *tx, GFP_KERNEL);
+ if (!tx) {
+ ret = -ENOMEM;
+ goto err_tx;
+ }
+ tx->hdr_inx = i;
+ list_add(&tx->txreq.list, &dev->txreq_free);
+ }
+
+ /*
+ * The system image GUID is supposed to be the same for all
+ * IB HCAs in a single system but since there can be other
+ * device types in the system, we can't be sure this is unique.
+ */
+ if (!ib_qib_sys_image_guid)
+ ib_qib_sys_image_guid = ppd->guid;
+
+ strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
+ ibdev->owner = THIS_MODULE;
+ ibdev->node_guid = ppd->guid;
+ ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
+ ibdev->uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ibdev->phys_port_cnt = dd->num_pports;
+ ibdev->num_comp_vectors = 1;
+ ibdev->dma_device = &dd->pcidev->dev;
+ ibdev->query_device = qib_query_device;
+ ibdev->modify_device = qib_modify_device;
+ ibdev->query_port = qib_query_port;
+ ibdev->modify_port = qib_modify_port;
+ ibdev->query_pkey = qib_query_pkey;
+ ibdev->query_gid = qib_query_gid;
+ ibdev->alloc_ucontext = qib_alloc_ucontext;
+ ibdev->dealloc_ucontext = qib_dealloc_ucontext;
+ ibdev->alloc_pd = qib_alloc_pd;
+ ibdev->dealloc_pd = qib_dealloc_pd;
+ ibdev->create_ah = qib_create_ah;
+ ibdev->destroy_ah = qib_destroy_ah;
+ ibdev->modify_ah = qib_modify_ah;
+ ibdev->query_ah = qib_query_ah;
+ ibdev->create_srq = qib_create_srq;
+ ibdev->modify_srq = qib_modify_srq;
+ ibdev->query_srq = qib_query_srq;
+ ibdev->destroy_srq = qib_destroy_srq;
+ ibdev->create_qp = qib_create_qp;
+ ibdev->modify_qp = qib_modify_qp;
+ ibdev->query_qp = qib_query_qp;
+ ibdev->destroy_qp = qib_destroy_qp;
+ ibdev->post_send = qib_post_send;
+ ibdev->post_recv = qib_post_receive;
+ ibdev->post_srq_recv = qib_post_srq_receive;
+ ibdev->create_cq = qib_create_cq;
+ ibdev->destroy_cq = qib_destroy_cq;
+ ibdev->resize_cq = qib_resize_cq;
+ ibdev->poll_cq = qib_poll_cq;
+ ibdev->req_notify_cq = qib_req_notify_cq;
+ ibdev->get_dma_mr = qib_get_dma_mr;
+ ibdev->reg_phys_mr = qib_reg_phys_mr;
+ ibdev->reg_user_mr = qib_reg_user_mr;
+ ibdev->dereg_mr = qib_dereg_mr;
+ ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
+ ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
+ ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
+ ibdev->alloc_fmr = qib_alloc_fmr;
+ ibdev->map_phys_fmr = qib_map_phys_fmr;
+ ibdev->unmap_fmr = qib_unmap_fmr;
+ ibdev->dealloc_fmr = qib_dealloc_fmr;
+ ibdev->attach_mcast = qib_multicast_attach;
+ ibdev->detach_mcast = qib_multicast_detach;
+ ibdev->process_mad = qib_process_mad;
+ ibdev->mmap = qib_mmap;
+ ibdev->dma_ops = &qib_dma_mapping_ops;
+
+ snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
+ QIB_IDSTR " %s", init_utsname()->nodename);
+
+ ret = ib_register_device(ibdev, qib_create_port_files);
+ if (ret)
+ goto err_reg;
+
+ ret = qib_create_agents(dev);
+ if (ret)
+ goto err_agents;
+
+ if (qib_verbs_register_sysfs(dd))
+ goto err_class;
+
+ goto bail;
+
+err_class:
+ qib_free_agents(dev);
+err_agents:
+ ib_unregister_device(ibdev);
+err_reg:
+err_tx:
+ while (!list_empty(&dev->txreq_free)) {
+ struct list_head *l = dev->txreq_free.next;
+ struct qib_verbs_txreq *tx;
+
+ list_del(l);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ kfree(tx);
+ }
+ if (ppd->sdma_descq_cnt)
+ dma_free_coherent(&dd->pcidev->dev,
+ ppd->sdma_descq_cnt *
+ sizeof(struct qib_pio_header),
+ dev->pio_hdrs, dev->pio_hdrs_phys);
+err_hdrs:
+ free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
+err_lk:
+ kfree(dev->qp_table);
+err_qpt:
+ qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
+bail:
+ return ret;
+}
+
+void qib_unregister_ib_device(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ struct ib_device *ibdev = &dev->ibdev;
+ u32 qps_inuse;
+ unsigned lk_tab_size;
+
+ qib_verbs_unregister_sysfs(dd);
+
+ qib_free_agents(dev);
+
+ ib_unregister_device(ibdev);
+
+ if (!list_empty(&dev->piowait))
+ qib_dev_err(dd, "piowait list not empty!\n");
+ if (!list_empty(&dev->dmawait))
+ qib_dev_err(dd, "dmawait list not empty!\n");
+ if (!list_empty(&dev->txwait))
+ qib_dev_err(dd, "txwait list not empty!\n");
+ if (!list_empty(&dev->memwait))
+ qib_dev_err(dd, "memwait list not empty!\n");
+ if (dev->dma_mr)
+ qib_dev_err(dd, "DMA MR not NULL!\n");
+
+ qps_inuse = qib_free_all_qps(dd);
+ if (qps_inuse)
+ qib_dev_err(dd, "QP memory leak! %u still in use\n",
+ qps_inuse);
+
+ del_timer_sync(&dev->mem_timer);
+ qib_free_qpn_table(&dev->qpn_table);
+ while (!list_empty(&dev->txreq_free)) {
+ struct list_head *l = dev->txreq_free.next;
+ struct qib_verbs_txreq *tx;
+
+ list_del(l);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ kfree(tx);
+ }
+ if (dd->pport->sdma_descq_cnt)
+ dma_free_coherent(&dd->pcidev->dev,
+ dd->pport->sdma_descq_cnt *
+ sizeof(struct qib_pio_header),
+ dev->pio_hdrs, dev->pio_hdrs_phys);
+ lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+ free_pages((unsigned long) dev->lk_table.table,
+ get_order(lk_tab_size));
+ kfree(dev->qp_table);
+}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
new file mode 100644
index 000000000000..bd57c1273225
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -0,0 +1,1100 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef QIB_VERBS_H
+#define QIB_VERBS_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_user_verbs.h>
+
+struct qib_ctxtdata;
+struct qib_pportdata;
+struct qib_devdata;
+struct qib_verbs_txreq;
+
+#define QIB_MAX_RDMA_ATOMIC 16
+#define QIB_GUIDS_PER_PORT 5
+
+#define QPN_MAX (1 << 24)
+#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define QIB_UVERBS_ABI_VERSION 2
+
+/*
+ * Define an ib_cq_notify value that is not valid so we know when CQ
+ * notifications are armed.
+ */
+#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
+
+#define IB_SEQ_NAK (3 << 29)
+
+/* AETH NAK opcode values */
+#define IB_RNR_NAK 0x20
+#define IB_NAK_PSN_ERROR 0x60
+#define IB_NAK_INVALID_REQUEST 0x61
+#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
+#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
+#define IB_NAK_INVALID_RD_REQUEST 0x64
+
+/* Flags for checking QP state (see ib_qib_state_ops[]) */
+#define QIB_POST_SEND_OK 0x01
+#define QIB_POST_RECV_OK 0x02
+#define QIB_PROCESS_RECV_OK 0x04
+#define QIB_PROCESS_SEND_OK 0x08
+#define QIB_PROCESS_NEXT_SEND_OK 0x10
+#define QIB_FLUSH_SEND 0x20
+#define QIB_FLUSH_RECV 0x40
+#define QIB_PROCESS_OR_FLUSH_SEND \
+ (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
+
+/* IB Performance Manager status values */
+#define IB_PMA_SAMPLE_STATUS_DONE 0x00
+#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
+#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
+
+/* Mandatory IB performance counter select values. */
+#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
+#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
+#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
+#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
+#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
+
+#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
+
+#define IB_BTH_REQ_ACK (1 << 31)
+#define IB_BTH_SOLICITED (1 << 23)
+#define IB_BTH_MIG_REQ (1 << 22)
+
+/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
+#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
+
+#define IB_GRH_VERSION 6
+#define IB_GRH_VERSION_MASK 0xF
+#define IB_GRH_VERSION_SHIFT 28
+#define IB_GRH_TCLASS_MASK 0xFF
+#define IB_GRH_TCLASS_SHIFT 20
+#define IB_GRH_FLOW_MASK 0xFFFFF
+#define IB_GRH_FLOW_SHIFT 0
+#define IB_GRH_NEXT_HDR 0x1B
+
+#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
+
+/* Values for set/get portinfo VLCap OperationalVLs */
+#define IB_VL_VL0 1
+#define IB_VL_VL0_1 2
+#define IB_VL_VL0_3 3
+#define IB_VL_VL0_7 4
+#define IB_VL_VL0_14 5
+
+static inline int qib_num_vls(int vls)
+{
+ switch (vls) {
+ default:
+ case IB_VL_VL0:
+ return 1;
+ case IB_VL_VL0_1:
+ return 2;
+ case IB_VL_VL0_3:
+ return 4;
+ case IB_VL_VL0_7:
+ return 8;
+ case IB_VL_VL0_14:
+ return 15;
+ }
+}
+
+struct ib_reth {
+ __be64 vaddr;
+ __be32 rkey;
+ __be32 length;
+} __attribute__ ((packed));
+
+struct ib_atomic_eth {
+ __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
+ __be32 rkey;
+ __be64 swap_data;
+ __be64 compare_data;
+} __attribute__ ((packed));
+
+struct qib_other_headers {
+ __be32 bth[3];
+ union {
+ struct {
+ __be32 deth[2];
+ __be32 imm_data;
+ } ud;
+ struct {
+ struct ib_reth reth;
+ __be32 imm_data;
+ } rc;
+ struct {
+ __be32 aeth;
+ __be32 atomic_ack_eth[2];
+ } at;
+ __be32 imm_data;
+ __be32 aeth;
+ struct ib_atomic_eth atomic_eth;
+ } u;
+} __attribute__ ((packed));
+
+/*
+ * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
+ * long (72 w/ imm_data). Only the first 56 bytes of the IB header
+ * will be in the eager header buffer. The remaining 12 or 16 bytes
+ * are in the data buffer.
+ */
+struct qib_ib_header {
+ __be16 lrh[4];
+ union {
+ struct {
+ struct ib_grh grh;
+ struct qib_other_headers oth;
+ } l;
+ struct qib_other_headers oth;
+ } u;
+} __attribute__ ((packed));
+
+struct qib_pio_header {
+ __le32 pbc[2];
+ struct qib_ib_header hdr;
+} __attribute__ ((packed));
+
+/*
+ * There is one struct qib_mcast for each multicast GID.
+ * All attached QPs are then stored as a list of
+ * struct qib_mcast_qp.
+ */
+struct qib_mcast_qp {
+ struct list_head list;
+ struct qib_qp *qp;
+};
+
+struct qib_mcast {
+ struct rb_node rb_node;
+ union ib_gid mgid;
+ struct list_head qp_list;
+ wait_queue_head_t wait;
+ atomic_t refcount;
+ int n_attached;
+};
+
+/* Protection domain */
+struct qib_pd {
+ struct ib_pd ibpd;
+ int user; /* non-zero if created from user space */
+};
+
+/* Address Handle */
+struct qib_ah {
+ struct ib_ah ibah;
+ struct ib_ah_attr attr;
+ atomic_t refcount;
+};
+
+/*
+ * This structure is used by qib_mmap() to validate an offset
+ * when an mmap() request is made. The vm_area_struct then uses
+ * this as its vm_private_data.
+ */
+struct qib_mmap_info {
+ struct list_head pending_mmaps;
+ struct ib_ucontext *context;
+ void *obj;
+ __u64 offset;
+ struct kref ref;
+ unsigned size;
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and completion queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ */
+struct qib_cq_wc {
+ u32 head; /* index of next entry to fill */
+ u32 tail; /* index of next ib_poll_cq() entry */
+ union {
+ /* these are actually size ibcq.cqe + 1 */
+ struct ib_uverbs_wc uqueue[0];
+ struct ib_wc kqueue[0];
+ };
+};
+
+/*
+ * The completion queue structure.
+ */
+struct qib_cq {
+ struct ib_cq ibcq;
+ struct work_struct comptask;
+ spinlock_t lock; /* protect changes in this struct */
+ u8 notify;
+ u8 triggered;
+ struct qib_cq_wc *queue;
+ struct qib_mmap_info *ip;
+};
+
+/*
+ * A segment is a linear region of low physical memory.
+ * XXX Maybe we should use phys addr here and kmap()/kunmap().
+ * Used by the verbs layer.
+ */
+struct qib_seg {
+ void *vaddr;
+ size_t length;
+};
+
+/* The number of qib_segs that fit in a page. */
+#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
+
+struct qib_segarray {
+ struct qib_seg segs[QIB_SEGSZ];
+};
+
+struct qib_mregion {
+ struct ib_pd *pd; /* shares refcnt of ibmr.pd */
+ u64 user_base; /* User's address for this region */
+ u64 iova; /* IB start address of this region */
+ size_t length;
+ u32 lkey;
+ u32 offset; /* offset (bytes) to start of region */
+ int access_flags;
+ u32 max_segs; /* number of qib_segs in all the arrays */
+ u32 mapsz; /* size of the map array */
+ atomic_t refcount;
+ struct qib_segarray *map[0]; /* the segments */
+};
+
+/*
+ * These keep track of the copy progress within a memory region.
+ * Used by the verbs layer.
+ */
+struct qib_sge {
+ struct qib_mregion *mr;
+ void *vaddr; /* kernel virtual address of segment */
+ u32 sge_length; /* length of the SGE */
+ u32 length; /* remaining length of the segment */
+ u16 m; /* current index: mr->map[m] */
+ u16 n; /* current index: mr->map[m]->segs[n] */
+};
+
+/* Memory region */
+struct qib_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct qib_mregion mr; /* must be last */
+};
+
+/*
+ * Send work request queue entry.
+ * The size of the sg_list is determined when the QP is created and stored
+ * in qp->s_max_sge.
+ */
+struct qib_swqe {
+ struct ib_send_wr wr; /* don't use wr.sg_list */
+ u32 psn; /* first packet sequence number */
+ u32 lpsn; /* last packet sequence number */
+ u32 ssn; /* send sequence number */
+ u32 length; /* total length of data in sg_list */
+ struct qib_sge sg_list[0];
+};
+
+/*
+ * Receive work request queue entry.
+ * The size of the sg_list is determined when the QP (or SRQ) is created
+ * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
+ */
+struct qib_rwqe {
+ u64 wr_id;
+ u8 num_sge;
+ struct ib_sge sg_list[0];
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and receive work queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ * Note that the wq array elements are variable size so you can't
+ * just index into the array to get the N'th element;
+ * use get_rwqe_ptr() instead.
+ */
+struct qib_rwq {
+ u32 head; /* new work requests posted to the head */
+ u32 tail; /* receives pull requests from here. */
+ struct qib_rwqe wq[0];
+};
+
+struct qib_rq {
+ struct qib_rwq *wq;
+ spinlock_t lock; /* protect changes in this struct */
+ u32 size; /* size of RWQE array */
+ u8 max_sge;
+};
+
+struct qib_srq {
+ struct ib_srq ibsrq;
+ struct qib_rq rq;
+ struct qib_mmap_info *ip;
+ /* send signal when number of RWQEs < limit */
+ u32 limit;
+};
+
+struct qib_sge_state {
+ struct qib_sge *sg_list; /* next SGE to be used if any */
+ struct qib_sge sge; /* progress state for the current SGE */
+ u32 total_len;
+ u8 num_sge;
+};
+
+/*
+ * This structure holds the information that the send tasklet needs
+ * to send a RDMA read response or atomic operation.
+ */
+struct qib_ack_entry {
+ u8 opcode;
+ u8 sent;
+ u32 psn;
+ u32 lpsn;
+ union {
+ struct qib_sge rdma_sge;
+ u64 atomic_data;
+ };
+};
+
+/*
+ * Variables prefixed with s_ are for the requester (sender).
+ * Variables prefixed with r_ are for the responder (receiver).
+ * Variables prefixed with ack_ are for responder replies.
+ *
+ * Common variables are protected by both r_rq.lock and s_lock in that order
+ * which only happens in modify_qp() or changing the QP 'state'.
+ */
+struct qib_qp {
+ struct ib_qp ibqp;
+ struct qib_qp *next; /* link list for QPN hash table */
+ struct qib_qp *timer_next; /* link list for qib_ib_timer() */
+ struct list_head iowait; /* link for wait PIO buf */
+ struct list_head rspwait; /* link for waititing to respond */
+ struct ib_ah_attr remote_ah_attr;
+ struct ib_ah_attr alt_ah_attr;
+ struct qib_ib_header s_hdr; /* next packet header to send */
+ atomic_t refcount;
+ wait_queue_head_t wait;
+ wait_queue_head_t wait_dma;
+ struct timer_list s_timer;
+ struct work_struct s_work;
+ struct qib_mmap_info *ip;
+ struct qib_sge_state *s_cur_sge;
+ struct qib_verbs_txreq *s_tx;
+ struct qib_mregion *s_rdma_mr;
+ struct qib_sge_state s_sge; /* current send request data */
+ struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1];
+ struct qib_sge_state s_ack_rdma_sge;
+ struct qib_sge_state s_rdma_read_sge;
+ struct qib_sge_state r_sge; /* current receive data */
+ spinlock_t r_lock; /* used for APM */
+ spinlock_t s_lock;
+ atomic_t s_dma_busy;
+ unsigned processor_id; /* Processor ID QP is bound to */
+ u32 s_flags;
+ u32 s_cur_size; /* size of send packet in bytes */
+ u32 s_len; /* total length of s_sge */
+ u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
+ u32 s_next_psn; /* PSN for next request */
+ u32 s_last_psn; /* last response PSN processed */
+ u32 s_sending_psn; /* lowest PSN that is being sent */
+ u32 s_sending_hpsn; /* highest PSN that is being sent */
+ u32 s_psn; /* current packet sequence number */
+ u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
+ u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
+ u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
+ u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
+ u64 r_wr_id; /* ID for current receive WQE */
+ unsigned long r_aflags;
+ u32 r_len; /* total length of r_sge */
+ u32 r_rcv_len; /* receive data len processed */
+ u32 r_psn; /* expected rcv packet sequence number */
+ u32 r_msn; /* message sequence number */
+ u16 s_hdrwords; /* size of s_hdr in 32 bit words */
+ u16 s_rdma_ack_cnt;
+ u8 state; /* QP state */
+ u8 s_state; /* opcode of last packet sent */
+ u8 s_ack_state; /* opcode of packet to ACK */
+ u8 s_nak_state; /* non-zero if NAK is pending */
+ u8 r_state; /* opcode of last packet received */
+ u8 r_nak_state; /* non-zero if NAK is pending */
+ u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
+ u8 r_flags;
+ u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
+ u8 r_head_ack_queue; /* index into s_ack_queue[] */
+ u8 qp_access_flags;
+ u8 s_max_sge; /* size of s_wq->sg_list */
+ u8 s_retry_cnt; /* number of times to retry */
+ u8 s_rnr_retry_cnt;
+ u8 s_retry; /* requester retry counter */
+ u8 s_rnr_retry; /* requester RNR retry counter */
+ u8 s_pkey_index; /* PKEY index to use */
+ u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
+ u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
+ u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
+ u8 s_tail_ack_queue; /* index into s_ack_queue[] */
+ u8 s_srate;
+ u8 s_draining;
+ u8 s_mig_state;
+ u8 timeout; /* Timeout for this QP */
+ u8 alt_timeout; /* Alternate path timeout for this QP */
+ u8 port_num;
+ enum ib_mtu path_mtu;
+ u32 remote_qpn;
+ u32 qkey; /* QKEY for this QP (for UD or RD) */
+ u32 s_size; /* send work queue size */
+ u32 s_head; /* new entries added here */
+ u32 s_tail; /* next entry to process */
+ u32 s_cur; /* current work queue entry */
+ u32 s_acked; /* last un-ACK'ed entry */
+ u32 s_last; /* last completed entry */
+ u32 s_ssn; /* SSN of tail entry */
+ u32 s_lsn; /* limit sequence number (credit) */
+ struct qib_swqe *s_wq; /* send work queue */
+ struct qib_swqe *s_wqe;
+ struct qib_rq r_rq; /* receive work queue */
+ struct qib_sge r_sg_list[0]; /* verified SGEs */
+};
+
+/*
+ * Atomic bit definitions for r_aflags.
+ */
+#define QIB_R_WRID_VALID 0
+#define QIB_R_REWIND_SGE 1
+
+/*
+ * Bit definitions for r_flags.
+ */
+#define QIB_R_REUSE_SGE 0x01
+#define QIB_R_RDMAR_SEQ 0x02
+#define QIB_R_RSP_NAK 0x04
+#define QIB_R_RSP_SEND 0x08
+#define QIB_R_COMM_EST 0x10
+
+/*
+ * Bit definitions for s_flags.
+ *
+ * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
+ * QIB_S_BUSY - send tasklet is processing the QP
+ * QIB_S_TIMER - the RC retry timer is active
+ * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
+ * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
+ * before processing the next SWQE
+ * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
+ * before processing the next SWQE
+ * QIB_S_WAIT_RNR - waiting for RNR timeout
+ * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
+ * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
+ * next send completion entry not via send DMA
+ * QIB_S_WAIT_PIO - waiting for a send buffer to be available
+ * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
+ * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
+ * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
+ * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
+ * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
+ * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
+ */
+#define QIB_S_SIGNAL_REQ_WR 0x0001
+#define QIB_S_BUSY 0x0002
+#define QIB_S_TIMER 0x0004
+#define QIB_S_RESP_PENDING 0x0008
+#define QIB_S_ACK_PENDING 0x0010
+#define QIB_S_WAIT_FENCE 0x0020
+#define QIB_S_WAIT_RDMAR 0x0040
+#define QIB_S_WAIT_RNR 0x0080
+#define QIB_S_WAIT_SSN_CREDIT 0x0100
+#define QIB_S_WAIT_DMA 0x0200
+#define QIB_S_WAIT_PIO 0x0400
+#define QIB_S_WAIT_TX 0x0800
+#define QIB_S_WAIT_DMA_DESC 0x1000
+#define QIB_S_WAIT_KMEM 0x2000
+#define QIB_S_WAIT_PSN 0x4000
+#define QIB_S_WAIT_ACK 0x8000
+#define QIB_S_SEND_ONE 0x10000
+#define QIB_S_UNLIMITED_CREDIT 0x20000
+
+/*
+ * Wait flags that would prevent any packet type from being sent.
+ */
+#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
+ QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
+
+/*
+ * Wait flags that would prevent send work requests from making progress.
+ */
+#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
+ QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
+ QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
+
+#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
+
+#define QIB_PSN_CREDIT 16
+
+/*
+ * Since struct qib_swqe is not a fixed size, we can't simply index into
+ * struct qib_qp.s_wq. This function does the array index computation.
+ */
+static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
+ unsigned n)
+{
+ return (struct qib_swqe *)((char *)qp->s_wq +
+ (sizeof(struct qib_swqe) +
+ qp->s_max_sge *
+ sizeof(struct qib_sge)) * n);
+}
+
+/*
+ * Since struct qib_rwqe is not a fixed size, we can't simply index into
+ * struct qib_rwq.wq. This function does the array index computation.
+ */
+static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
+{
+ return (struct qib_rwqe *)
+ ((char *) rq->wq->wq +
+ (sizeof(struct qib_rwqe) +
+ rq->max_sge * sizeof(struct ib_sge)) * n);
+}
+
+/*
+ * QPN-map pages start out as NULL, they get allocated upon
+ * first use and are never deallocated. This way,
+ * large bitmaps are not allocated unless large numbers of QPs are used.
+ */
+struct qpn_map {
+ void *page;
+};
+
+struct qib_qpn_table {
+ spinlock_t lock; /* protect changes in this struct */
+ unsigned flags; /* flags for QP0/1 allocated for each port */
+ u32 last; /* last QP number allocated */
+ u32 nmaps; /* size of the map table */
+ u16 limit;
+ u16 mask;
+ /* bit map of free QP numbers other than 0/1 */
+ struct qpn_map map[QPNMAP_ENTRIES];
+};
+
+struct qib_lkey_table {
+ spinlock_t lock; /* protect changes in this struct */
+ u32 next; /* next unused index (speeds search) */
+ u32 gen; /* generation count */
+ u32 max; /* size of the table */
+ struct qib_mregion **table;
+};
+
+struct qib_opcode_stats {
+ u64 n_packets; /* number of packets */
+ u64 n_bytes; /* total number of bytes */
+};
+
+struct qib_ibport {
+ struct qib_qp *qp0;
+ struct qib_qp *qp1;
+ struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
+ struct qib_ah *sm_ah;
+ struct qib_ah *smi_ah;
+ struct rb_root mcast_tree;
+ spinlock_t lock; /* protect changes in this struct */
+
+ /* non-zero when timer is set */
+ unsigned long mkey_lease_timeout;
+ unsigned long trap_timeout;
+ __be64 gid_prefix; /* in network order */
+ __be64 mkey;
+ __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
+ u64 tid; /* TID for traps */
+ u64 n_unicast_xmit; /* total unicast packets sent */
+ u64 n_unicast_rcv; /* total unicast packets received */
+ u64 n_multicast_xmit; /* total multicast packets sent */
+ u64 n_multicast_rcv; /* total multicast packets received */
+ u64 z_symbol_error_counter; /* starting count for PMA */
+ u64 z_link_error_recovery_counter; /* starting count for PMA */
+ u64 z_link_downed_counter; /* starting count for PMA */
+ u64 z_port_rcv_errors; /* starting count for PMA */
+ u64 z_port_rcv_remphys_errors; /* starting count for PMA */
+ u64 z_port_xmit_discards; /* starting count for PMA */
+ u64 z_port_xmit_data; /* starting count for PMA */
+ u64 z_port_rcv_data; /* starting count for PMA */
+ u64 z_port_xmit_packets; /* starting count for PMA */
+ u64 z_port_rcv_packets; /* starting count for PMA */
+ u32 z_local_link_integrity_errors; /* starting count for PMA */
+ u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
+ u32 z_vl15_dropped; /* starting count for PMA */
+ u32 n_rc_resends;
+ u32 n_rc_acks;
+ u32 n_rc_qacks;
+ u32 n_rc_delayed_comp;
+ u32 n_seq_naks;
+ u32 n_rdma_seq;
+ u32 n_rnr_naks;
+ u32 n_other_naks;
+ u32 n_loop_pkts;
+ u32 n_pkt_drops;
+ u32 n_vl15_dropped;
+ u32 n_rc_timeouts;
+ u32 n_dmawait;
+ u32 n_unaligned;
+ u32 n_rc_dupreq;
+ u32 n_rc_seqnak;
+ u32 port_cap_flags;
+ u32 pma_sample_start;
+ u32 pma_sample_interval;
+ __be16 pma_counter_select[5];
+ u16 pma_tag;
+ u16 pkey_violations;
+ u16 qkey_violations;
+ u16 mkey_violations;
+ u16 mkey_lease_period;
+ u16 sm_lid;
+ u16 repress_traps;
+ u8 sm_sl;
+ u8 mkeyprot;
+ u8 subnet_timeout;
+ u8 vl_high_limit;
+ u8 sl_to_vl[16];
+
+ struct qib_opcode_stats opstats[128];
+};
+
+struct qib_ibdev {
+ struct ib_device ibdev;
+ struct list_head pending_mmaps;
+ spinlock_t mmap_offset_lock; /* protect mmap_offset */
+ u32 mmap_offset;
+ struct qib_mregion *dma_mr;
+
+ /* QP numbers are shared by all IB ports */
+ struct qib_qpn_table qpn_table;
+ struct qib_lkey_table lk_table;
+ struct list_head piowait; /* list for wait PIO buf */
+ struct list_head dmawait; /* list for wait DMA */
+ struct list_head txwait; /* list for wait qib_verbs_txreq */
+ struct list_head memwait; /* list for wait kernel memory */
+ struct list_head txreq_free;
+ struct timer_list mem_timer;
+ struct qib_qp **qp_table;
+ struct qib_pio_header *pio_hdrs;
+ dma_addr_t pio_hdrs_phys;
+ /* list of QPs waiting for RNR timer */
+ spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
+ unsigned qp_table_size; /* size of the hash table */
+ spinlock_t qpt_lock;
+
+ u32 n_piowait;
+ u32 n_txwait;
+
+ u32 n_pds_allocated; /* number of PDs allocated for device */
+ spinlock_t n_pds_lock;
+ u32 n_ahs_allocated; /* number of AHs allocated for device */
+ spinlock_t n_ahs_lock;
+ u32 n_cqs_allocated; /* number of CQs allocated for device */
+ spinlock_t n_cqs_lock;
+ u32 n_qps_allocated; /* number of QPs allocated for device */
+ spinlock_t n_qps_lock;
+ u32 n_srqs_allocated; /* number of SRQs allocated for device */
+ spinlock_t n_srqs_lock;
+ u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
+ spinlock_t n_mcast_grps_lock;
+};
+
+struct qib_verbs_counters {
+ u64 symbol_error_counter;
+ u64 link_error_recovery_counter;
+ u64 link_downed_counter;
+ u64 port_rcv_errors;
+ u64 port_rcv_remphys_errors;
+ u64 port_xmit_discards;
+ u64 port_xmit_data;
+ u64 port_rcv_data;
+ u64 port_xmit_packets;
+ u64 port_rcv_packets;
+ u32 local_link_integrity_errors;
+ u32 excessive_buffer_overrun_errors;
+ u32 vl15_dropped;
+};
+
+static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct qib_mr, ibmr);
+}
+
+static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct qib_pd, ibpd);
+}
+
+static inline struct qib_ah *to_iah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct qib_ah, ibah);
+}
+
+static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct qib_cq, ibcq);
+}
+
+static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct qib_srq, ibsrq);
+}
+
+static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct qib_qp, ibqp);
+}
+
+static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct qib_ibdev, ibdev);
+}
+
+/*
+ * Send if not busy or waiting for I/O and either
+ * a RC response is pending or we can process send work requests.
+ */
+static inline int qib_send_ok(struct qib_qp *qp)
+{
+ return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
+ (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
+ !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
+}
+
+extern struct workqueue_struct *qib_wq;
+extern struct workqueue_struct *qib_cq_wq;
+
+/*
+ * This must be called with s_lock held.
+ */
+static inline void qib_schedule_send(struct qib_qp *qp)
+{
+ if (qib_send_ok(qp)) {
+ if (qp->processor_id == smp_processor_id())
+ queue_work(qib_wq, &qp->s_work);
+ else
+ queue_work_on(qp->processor_id,
+ qib_wq, &qp->s_work);
+ }
+}
+
+static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
+{
+ u16 p1 = pkey1 & 0x7FFF;
+ u16 p2 = pkey2 & 0x7FFF;
+
+ /*
+ * Low 15 bits must be non-zero and match, and
+ * one of the two must be a full member.
+ */
+ return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
+}
+
+void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
+ u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
+void qib_cap_mask_chg(struct qib_ibport *ibp);
+void qib_sys_guid_chg(struct qib_ibport *ibp);
+void qib_node_desc_chg(struct qib_ibport *ibp);
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad);
+int qib_create_agents(struct qib_ibdev *dev);
+void qib_free_agents(struct qib_ibdev *dev);
+
+/*
+ * Compare the lower 24 bits of the two values.
+ * Returns an integer <, ==, or > than zero.
+ */
+static inline int qib_cmp24(u32 a, u32 b)
+{
+ return (((int) a) - ((int) b)) << 8;
+}
+
+struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
+
+int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
+ u64 *rwords, u64 *spkts, u64 *rpkts,
+ u64 *xmit_wait);
+
+int qib_get_counters(struct qib_pportdata *ppd,
+ struct qib_verbs_counters *cntrs);
+
+int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int qib_mcast_tree_empty(struct qib_ibport *ibp);
+
+__be32 qib_compute_aeth(struct qib_qp *qp);
+
+struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
+
+struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+
+int qib_destroy_qp(struct ib_qp *ibqp);
+
+int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
+
+int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+
+int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr);
+
+unsigned qib_free_all_qps(struct qib_devdata *dd);
+
+void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
+
+void qib_free_qpn_table(struct qib_qpn_table *qpt);
+
+void qib_get_credit(struct qib_qp *qp, u32 aeth);
+
+unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
+
+void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
+
+void qib_put_txreq(struct qib_verbs_txreq *tx);
+
+int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len);
+
+void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
+ int release);
+
+void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
+
+void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
+
+void qib_rc_rnr_retry(unsigned long arg);
+
+void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
+
+void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
+
+int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
+
+void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr);
+
+int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr);
+
+int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
+ struct qib_sge *isge, struct ib_sge *sge, int acc);
+
+int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc);
+
+int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+
+struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+
+int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata);
+
+int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+
+int qib_destroy_srq(struct ib_srq *ibsrq);
+
+void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
+
+int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+
+struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
+ int comp_vector, struct ib_ucontext *context,
+ struct ib_udata *udata);
+
+int qib_destroy_cq(struct ib_cq *ibcq);
+
+int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
+
+int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+
+struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
+
+struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start);
+
+struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata);
+
+int qib_dereg_mr(struct ib_mr *ibmr);
+
+struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
+
+struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
+ struct ib_device *ibdev, int page_list_len);
+
+void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
+
+int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
+
+struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr);
+
+int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova);
+
+int qib_unmap_fmr(struct list_head *fmr_list);
+
+int qib_dealloc_fmr(struct ib_fmr *ibfmr);
+
+void qib_release_mmap_info(struct kref *ref);
+
+struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
+ struct ib_ucontext *context,
+ void *obj);
+
+void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
+ u32 size, void *obj);
+
+int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
+
+void qib_migrate_qp(struct qib_qp *qp);
+
+int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, struct qib_qp *qp, u32 bth0);
+
+u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
+ struct ib_global_route *grh, u32 hwords, u32 nwords);
+
+void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+ u32 bth0, u32 bth2);
+
+void qib_do_send(struct work_struct *work);
+
+void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+ enum ib_wc_status status);
+
+void qib_send_rc_ack(struct qib_qp *qp);
+
+int qib_make_rc_req(struct qib_qp *qp);
+
+int qib_make_uc_req(struct qib_qp *qp);
+
+int qib_make_ud_req(struct qib_qp *qp);
+
+int qib_register_ib_device(struct qib_devdata *);
+
+void qib_unregister_ib_device(struct qib_devdata *);
+
+void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
+
+void qib_ib_piobufavail(struct qib_devdata *);
+
+unsigned qib_get_npkeys(struct qib_devdata *);
+
+unsigned qib_get_pkey(struct qib_ibport *, unsigned);
+
+extern const enum ib_wc_opcode ib_qib_wc_opcode[];
+
+/*
+ * Below HCA-independent IB PhysPortState values, returned
+ * by the f_ibphys_portstate() routine.
+ */
+#define IB_PHYSPORTSTATE_SLEEP 1
+#define IB_PHYSPORTSTATE_POLL 2
+#define IB_PHYSPORTSTATE_DISABLED 3
+#define IB_PHYSPORTSTATE_CFG_TRAIN 4
+#define IB_PHYSPORTSTATE_LINKUP 5
+#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
+#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
+#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
+#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
+#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
+#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
+#define IB_PHYSPORTSTATE_CFG_ENH 0x10
+#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
+
+extern const int ib_qib_state_ops[];
+
+extern __be64 ib_qib_sys_image_guid; /* in network order */
+
+extern unsigned int ib_qib_lkey_table_size;
+
+extern unsigned int ib_qib_max_cqes;
+
+extern unsigned int ib_qib_max_cqs;
+
+extern unsigned int ib_qib_max_qp_wrs;
+
+extern unsigned int ib_qib_max_qps;
+
+extern unsigned int ib_qib_max_sges;
+
+extern unsigned int ib_qib_max_mcast_grps;
+
+extern unsigned int ib_qib_max_mcast_qp_attached;
+
+extern unsigned int ib_qib_max_srqs;
+
+extern unsigned int ib_qib_max_srq_sges;
+
+extern unsigned int ib_qib_max_srq_wrs;
+
+extern const u32 ib_qib_rnr_table[];
+
+extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
+
+#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
new file mode 100644
index 000000000000..dabb697b1c2a
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/rculist.h>
+
+#include "qib.h"
+
+/**
+ * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
+ * @qp: the QP to link
+ */
+static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
+{
+ struct qib_mcast_qp *mqp;
+
+ mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
+ if (!mqp)
+ goto bail;
+
+ mqp->qp = qp;
+ atomic_inc(&qp->refcount);
+
+bail:
+ return mqp;
+}
+
+static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
+{
+ struct qib_qp *qp = mqp->qp;
+
+ /* Notify qib_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+
+ kfree(mqp);
+}
+
+/**
+ * qib_mcast_alloc - allocate the multicast GID structure
+ * @mgid: the multicast GID
+ *
+ * A list of QPs will be attached to this structure.
+ */
+static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
+{
+ struct qib_mcast *mcast;
+
+ mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
+ if (!mcast)
+ goto bail;
+
+ mcast->mgid = *mgid;
+ INIT_LIST_HEAD(&mcast->qp_list);
+ init_waitqueue_head(&mcast->wait);
+ atomic_set(&mcast->refcount, 0);
+ mcast->n_attached = 0;
+
+bail:
+ return mcast;
+}
+
+static void qib_mcast_free(struct qib_mcast *mcast)
+{
+ struct qib_mcast_qp *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
+ qib_mcast_qp_free(p);
+
+ kfree(mcast);
+}
+
+/**
+ * qib_mcast_find - search the global table for the given multicast GID
+ * @ibp: the IB port structure
+ * @mgid: the multicast GID to search for
+ *
+ * Returns NULL if not found.
+ *
+ * The caller is responsible for decrementing the reference count if found.
+ */
+struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
+{
+ struct rb_node *n;
+ unsigned long flags;
+ struct qib_mcast *mcast;
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ n = ibp->mcast_tree.rb_node;
+ while (n) {
+ int ret;
+
+ mcast = rb_entry(n, struct qib_mcast, rb_node);
+
+ ret = memcmp(mgid->raw, mcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else {
+ atomic_inc(&mcast->refcount);
+ spin_unlock_irqrestore(&ibp->lock, flags);
+ goto bail;
+ }
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ mcast = NULL;
+
+bail:
+ return mcast;
+}
+
+/**
+ * qib_mcast_add - insert mcast GID into table and attach QP struct
+ * @mcast: the mcast GID table
+ * @mqp: the QP to attach
+ *
+ * Return zero if both were added. Return EEXIST if the GID was already in
+ * the table but the QP was added. Return ESRCH if the QP was already
+ * attached and neither structure was added.
+ */
+static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
+ struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
+{
+ struct rb_node **n = &ibp->mcast_tree.rb_node;
+ struct rb_node *pn = NULL;
+ int ret;
+
+ spin_lock_irq(&ibp->lock);
+
+ while (*n) {
+ struct qib_mcast *tmcast;
+ struct qib_mcast_qp *p;
+
+ pn = *n;
+ tmcast = rb_entry(pn, struct qib_mcast, rb_node);
+
+ ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0) {
+ n = &pn->rb_left;
+ continue;
+ }
+ if (ret > 0) {
+ n = &pn->rb_right;
+ continue;
+ }
+
+ /* Search the QP list to see if this is already there. */
+ list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
+ if (p->qp == mqp->qp) {
+ ret = ESRCH;
+ goto bail;
+ }
+ }
+ if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
+ ret = ENOMEM;
+ goto bail;
+ }
+
+ tmcast->n_attached++;
+
+ list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
+ ret = EEXIST;
+ goto bail;
+ }
+
+ spin_lock(&dev->n_mcast_grps_lock);
+ if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
+ spin_unlock(&dev->n_mcast_grps_lock);
+ ret = ENOMEM;
+ goto bail;
+ }
+
+ dev->n_mcast_grps_allocated++;
+ spin_unlock(&dev->n_mcast_grps_lock);
+
+ mcast->n_attached++;
+
+ list_add_tail_rcu(&mqp->list, &mcast->qp_list);
+
+ atomic_inc(&mcast->refcount);
+ rb_link_node(&mcast->rb_node, pn, n);
+ rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
+
+ ret = 0;
+
+bail:
+ spin_unlock_irq(&ibp->lock);
+
+ return ret;
+}
+
+int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_ibport *ibp;
+ struct qib_mcast *mcast;
+ struct qib_mcast_qp *mqp;
+ int ret;
+
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Allocate data structures since its better to do this outside of
+ * spin locks and it will most likely be needed.
+ */
+ mcast = qib_mcast_alloc(gid);
+ if (mcast == NULL) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ mqp = qib_mcast_qp_alloc(qp);
+ if (mqp == NULL) {
+ qib_mcast_free(mcast);
+ ret = -ENOMEM;
+ goto bail;
+ }
+ ibp = to_iport(ibqp->device, qp->port_num);
+ switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
+ case ESRCH:
+ /* Neither was used: OK to attach the same QP twice. */
+ qib_mcast_qp_free(mqp);
+ qib_mcast_free(mcast);
+ break;
+
+ case EEXIST: /* The mcast wasn't used */
+ qib_mcast_free(mcast);
+ break;
+
+ case ENOMEM:
+ /* Exceeded the maximum number of mcast groups. */
+ qib_mcast_qp_free(mqp);
+ qib_mcast_free(mcast);
+ ret = -ENOMEM;
+ goto bail;
+
+ default:
+ break;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
+ struct qib_mcast *mcast = NULL;
+ struct qib_mcast_qp *p, *tmp;
+ struct rb_node *n;
+ int last = 0;
+ int ret;
+
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irq(&ibp->lock);
+
+ /* Find the GID in the mcast table. */
+ n = ibp->mcast_tree.rb_node;
+ while (1) {
+ if (n == NULL) {
+ spin_unlock_irq(&ibp->lock);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ mcast = rb_entry(n, struct qib_mcast, rb_node);
+ ret = memcmp(gid->raw, mcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ /* Search the QP list. */
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
+ if (p->qp != qp)
+ continue;
+ /*
+ * We found it, so remove it, but don't poison the forward
+ * link until we are sure there are no list walkers.
+ */
+ list_del_rcu(&p->list);
+ mcast->n_attached--;
+
+ /* If this was the last attached QP, remove the GID too. */
+ if (list_empty(&mcast->qp_list)) {
+ rb_erase(&mcast->rb_node, &ibp->mcast_tree);
+ last = 1;
+ }
+ break;
+ }
+
+ spin_unlock_irq(&ibp->lock);
+
+ if (p) {
+ /*
+ * Wait for any list walkers to finish before freeing the
+ * list element.
+ */
+ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+ qib_mcast_qp_free(p);
+ }
+ if (last) {
+ atomic_dec(&mcast->refcount);
+ wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+ qib_mcast_free(mcast);
+ spin_lock_irq(&dev->n_mcast_grps_lock);
+ dev->n_mcast_grps_allocated--;
+ spin_unlock_irq(&dev->n_mcast_grps_lock);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int qib_mcast_tree_empty(struct qib_ibport *ibp)
+{
+ return ibp->mcast_tree.rb_node == NULL;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_7220.h b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
index 74fa5cc5131d..673cf4c22ebd 100644
--- a/drivers/infiniband/hw/ipath/ipath_7220.h
+++ b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
@@ -1,7 +1,5 @@
-#ifndef _IPATH_7220_H
-#define _IPATH_7220_H
/*
- * Copyright (c) 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -33,25 +31,32 @@
*/
/*
- * This header file provides the declarations and common definitions
- * for (mostly) manipulation of the SerDes blocks within the IBA7220.
- * the functions declared should only be called from within other
- * 7220-related files such as ipath_iba7220.c or ipath_sd7220.c.
+ * This file is conditionally built on PowerPC only. Otherwise weak symbol
+ * versions of the functions exported from here are used.
*/
-int ipath_sd7220_presets(struct ipath_devdata *dd);
-int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset);
-int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img,
- int len, int offset);
-int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img,
- int len, int offset);
-/*
- * Below used for sdnum parameter, selecting one of the two sections
- * used for PCIe, or the single SerDes used for IB, which is the
- * only one currently used
- */
-#define IB_7220_SERDES 2
-int ipath_sd7220_ib_load(struct ipath_devdata *dd);
-int ipath_sd7220_ib_vfy(struct ipath_devdata *dd);
+#include "qib.h"
-#endif /* _IPATH_7220_H */
+/**
+ * qib_enable_wc - enable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ *
+ * Nothing to do on PowerPC, so just return without error.
+ */
+int qib_enable_wc(struct qib_devdata *dd)
+{
+ return 0;
+}
+
+/**
+ * qib_unordered_wc - indicate whether write combining is unordered
+ *
+ * Because our performance depends on our ability to do write
+ * combining mmio writes in the most efficient way, we need to
+ * know if we are on a processor that may reorder stores when
+ * write combining.
+ */
+int qib_unordered_wc(void)
+{
+ return 1;
+}
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
new file mode 100644
index 000000000000..561b8bca4060
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file is conditionally built on x86_64 only. Otherwise weak symbol
+ * versions of the functions exported from here are used.
+ */
+
+#include <linux/pci.h>
+#include <asm/mtrr.h>
+#include <asm/processor.h>
+
+#include "qib.h"
+
+/**
+ * qib_enable_wc - enable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ *
+ * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
+ * write combining.
+ */
+int qib_enable_wc(struct qib_devdata *dd)
+{
+ int ret = 0;
+ u64 pioaddr, piolen;
+ unsigned bits;
+ const unsigned long addr = pci_resource_start(dd->pcidev, 0);
+ const size_t len = pci_resource_len(dd->pcidev, 0);
+
+ /*
+ * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
+ * chip. Linux (possibly the hardware) requires it to be on a power
+ * of 2 address matching the length (which has to be a power of 2).
+ * For rev1, that means the base address, for rev2, it will be just
+ * the PIO buffers themselves.
+ * For chips with two sets of buffers, the calculations are
+ * somewhat more complicated; we need to sum, and the piobufbase
+ * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
+ * The buffers are still packed, so a single range covers both.
+ */
+ if (dd->piobcnt2k && dd->piobcnt4k) {
+ /* 2 sizes for chip */
+ unsigned long pio2kbase, pio4kbase;
+ pio2kbase = dd->piobufbase & 0xffffffffUL;
+ pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
+ if (pio2kbase < pio4kbase) {
+ /* all current chips */
+ pioaddr = addr + pio2kbase;
+ piolen = pio4kbase - pio2kbase +
+ dd->piobcnt4k * dd->align4k;
+ } else {
+ pioaddr = addr + pio4kbase;
+ piolen = pio2kbase - pio4kbase +
+ dd->piobcnt2k * dd->palign;
+ }
+ } else { /* single buffer size (2K, currently) */
+ pioaddr = addr + dd->piobufbase;
+ piolen = dd->piobcnt2k * dd->palign +
+ dd->piobcnt4k * dd->align4k;
+ }
+
+ for (bits = 0; !(piolen & (1ULL << bits)); bits++)
+ /* do nothing */ ;
+
+ if (piolen != (1ULL << bits)) {
+ piolen >>= bits;
+ while (piolen >>= 1)
+ bits++;
+ piolen = 1ULL << (bits + 1);
+ }
+ if (pioaddr & (piolen - 1)) {
+ u64 atmp;
+ atmp = pioaddr & ~(piolen - 1);
+ if (atmp < addr || (atmp + piolen) > (addr + len)) {
+ qib_dev_err(dd, "No way to align address/size "
+ "(%llx/%llx), no WC mtrr\n",
+ (unsigned long long) atmp,
+ (unsigned long long) piolen << 1);
+ ret = -ENODEV;
+ } else {
+ pioaddr = atmp;
+ piolen <<= 1;
+ }
+ }
+
+ if (!ret) {
+ int cookie;
+
+ cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
+ if (cookie < 0) {
+ {
+ qib_devinfo(dd->pcidev,
+ "mtrr_add() WC for PIO bufs "
+ "failed (%d)\n",
+ cookie);
+ ret = -EINVAL;
+ }
+ } else {
+ dd->wc_cookie = cookie;
+ dd->wc_base = (unsigned long) pioaddr;
+ dd->wc_len = (unsigned long) piolen;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * qib_disable_wc - disable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ */
+void qib_disable_wc(struct qib_devdata *dd)
+{
+ if (dd->wc_cookie) {
+ int r;
+
+ r = mtrr_del(dd->wc_cookie, dd->wc_base,
+ dd->wc_len);
+ if (r < 0)
+ qib_devinfo(dd->pcidev,
+ "mtrr_del(%lx, %lx, %lx) failed: %d\n",
+ dd->wc_cookie, dd->wc_base,
+ dd->wc_len, r);
+ dd->wc_cookie = 0; /* even on failure */
+ }
+}
+
+/**
+ * qib_unordered_wc - indicate whether write combining is ordered
+ *
+ * Because our performance depends on our ability to do write combining mmio
+ * writes in the most efficient way, we need to know if we are on an Intel
+ * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
+ * the order completed, and so no special flushing is required to get
+ * correct ordering. Intel processors, however, will flush write buffers
+ * out in "random" orders, and so explicit ordering is needed at times.
+ */
+int qib_unordered_wc(void)
+{
+ return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
+}
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 423e0e6031ab..34157bb97ed6 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -47,15 +47,15 @@ struct joydev {
struct mutex mutex;
struct device dev;
- struct js_corr corr[ABS_MAX + 1];
+ struct js_corr corr[ABS_CNT];
struct JS_DATA_SAVE_TYPE glue;
int nabs;
int nkey;
__u16 keymap[KEY_MAX - BTN_MISC + 1];
__u16 keypam[KEY_MAX - BTN_MISC + 1];
- __u8 absmap[ABS_MAX + 1];
- __u8 abspam[ABS_MAX + 1];
- __s16 abs[ABS_MAX + 1];
+ __u8 absmap[ABS_CNT];
+ __u8 abspam[ABS_CNT];
+ __s16 abs[ABS_CNT];
};
struct joydev_client {
@@ -826,7 +826,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
joydev->handle.handler = handler;
joydev->handle.private = joydev;
- for (i = 0; i < ABS_MAX + 1; i++)
+ for (i = 0; i < ABS_CNT; i++)
if (test_bit(i, dev->absbit)) {
joydev->absmap[i] = joydev->nabs;
joydev->abspam[joydev->nabs] = i;
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 4771ab172b59..744600eff222 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -287,7 +287,6 @@ static int __devexit adp5588_remove(struct i2c_client *client)
free_irq(client->irq, kpad);
cancel_delayed_work_sync(&kpad->work);
input_unregister_device(kpad->input);
- i2c_set_clientdata(client, NULL);
kfree(kpad);
return 0;
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 35149ec455a9..79172af164f2 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/keyboard.h>
+#include <linux/platform_device.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
@@ -154,10 +155,9 @@ static const char *amikbd_messages[8] = {
[7] = KERN_WARNING "amikbd: keyboard interrupt\n"
};
-static struct input_dev *amikbd_dev;
-
-static irqreturn_t amikbd_interrupt(int irq, void *dummy)
+static irqreturn_t amikbd_interrupt(int irq, void *data)
{
+ struct input_dev *dev = data;
unsigned char scancode, down;
scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */
@@ -170,47 +170,42 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy)
if (scancode < 0x78) { /* scancodes < 0x78 are keys */
if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */
- input_report_key(amikbd_dev, scancode, 1);
- input_report_key(amikbd_dev, scancode, 0);
+ input_report_key(dev, scancode, 1);
+ input_report_key(dev, scancode, 0);
} else {
- input_report_key(amikbd_dev, scancode, down);
+ input_report_key(dev, scancode, down);
}
- input_sync(amikbd_dev);
+ input_sync(dev);
} else /* scancodes >= 0x78 are error codes */
printk(amikbd_messages[scancode - 0x78]);
return IRQ_HANDLED;
}
-static int __init amikbd_init(void)
+static int __init amikbd_probe(struct platform_device *pdev)
{
+ struct input_dev *dev;
int i, j, err;
- if (!AMIGAHW_PRESENT(AMI_KEYBOARD))
- return -ENODEV;
-
- if (!request_mem_region(CIAA_PHYSADDR-1+0xb00, 0x100, "amikeyb"))
- return -EBUSY;
-
- amikbd_dev = input_allocate_device();
- if (!amikbd_dev) {
- printk(KERN_ERR "amikbd: not enough memory for input device\n");
- err = -ENOMEM;
- goto fail1;
+ dev = input_allocate_device();
+ if (!dev) {
+ dev_err(&pdev->dev, "Not enough memory for input device\n");
+ return -ENOMEM;
}
- amikbd_dev->name = "Amiga Keyboard";
- amikbd_dev->phys = "amikbd/input0";
- amikbd_dev->id.bustype = BUS_AMIGA;
- amikbd_dev->id.vendor = 0x0001;
- amikbd_dev->id.product = 0x0001;
- amikbd_dev->id.version = 0x0100;
+ dev->name = pdev->name;
+ dev->phys = "amikbd/input0";
+ dev->id.bustype = BUS_AMIGA;
+ dev->id.vendor = 0x0001;
+ dev->id.product = 0x0001;
+ dev->id.version = 0x0100;
+ dev->dev.parent = &pdev->dev;
- amikbd_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
for (i = 0; i < 0x78; i++)
- set_bit(i, amikbd_dev->keybit);
+ set_bit(i, dev->keybit);
for (i = 0; i < MAX_NR_KEYMAPS; i++) {
static u_short temp_map[NR_KEYS] __initdata;
@@ -229,30 +224,54 @@ static int __init amikbd_init(void)
memcpy(key_maps[i], temp_map, sizeof(temp_map));
}
ciaa.cra &= ~0x41; /* serial data in, turn off TA */
- if (request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
- amikbd_interrupt)) {
- err = -EBUSY;
+ err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
+ dev);
+ if (err)
goto fail2;
- }
- err = input_register_device(amikbd_dev);
+ err = input_register_device(dev);
if (err)
goto fail3;
+ platform_set_drvdata(pdev, dev);
+
return 0;
- fail3: free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt);
- fail2: input_free_device(amikbd_dev);
- fail1: release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
+ fail3: free_irq(IRQ_AMIGA_CIAA_SP, dev);
+ fail2: input_free_device(dev);
return err;
}
-static void __exit amikbd_exit(void)
+static int __exit amikbd_remove(struct platform_device *pdev)
+{
+ struct input_dev *dev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ free_irq(IRQ_AMIGA_CIAA_SP, dev);
+ input_unregister_device(dev);
+ return 0;
+}
+
+static struct platform_driver amikbd_driver = {
+ .remove = __exit_p(amikbd_remove),
+ .driver = {
+ .name = "amiga-keyboard",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amikbd_init(void)
{
- free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt);
- input_unregister_device(amikbd_dev);
- release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
+ return platform_driver_probe(&amikbd_driver, amikbd_probe);
}
module_init(amikbd_init);
+
+static void __exit amikbd_exit(void)
+{
+ platform_driver_unregister(&amikbd_driver);
+}
+
module_exit(amikbd_exit);
+
+MODULE_ALIAS("platform:amiga-keyboard");
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index bc696931fed7..40b032f0e32c 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -778,8 +778,6 @@ static int __devexit lm8323_remove(struct i2c_client *client)
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
- i2c_set_clientdata(client, NULL);
-
disable_irq_wake(client->irq);
free_irq(client->irq, lm);
cancel_work_sync(&lm->work);
diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c
index 7fc8185e5c1b..9091ff5ea808 100644
--- a/drivers/input/keyboard/max7359_keypad.c
+++ b/drivers/input/keyboard/max7359_keypad.c
@@ -265,7 +265,6 @@ static int __devexit max7359_remove(struct i2c_client *client)
free_irq(client->irq, keypad);
input_unregister_device(keypad->input_dev);
- i2c_set_clientdata(client, NULL);
kfree(keypad);
return 0;
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 31f30087b591..fac695157e8a 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -358,7 +358,6 @@ static int __devexit qt2160_remove(struct i2c_client *client)
input_unregister_device(qt2160->input);
kfree(qt2160);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 493c93f25e2a..00137bebcf97 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -316,8 +316,6 @@ static int __devexit tca6416_keypad_remove(struct i2c_client *client)
input_unregister_device(chip->input);
kfree(chip);
- i2c_set_clientdata(client, NULL);
-
return 0;
}
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 48cdabec372a..c44b9eafc556 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -80,6 +80,16 @@ config INPUT_M68K_BEEP
tristate "M68k Beeper support"
depends on M68K
+config INPUT_MAX8925_ONKEY
+ tristate "MAX8925 ONKEY support"
+ depends on MFD_MAX8925
+ help
+ Support the ONKEY of MAX8925 PMICs as an input device
+ reporting power button status.
+
+ To compile this driver as a module, choose M here: the module
+ will be called max8925_onkey.
+
config INPUT_APANEL
tristate "Fujitsu Lifebook Application Panel buttons"
depends on X86 && I2C && LEDS_CLASS
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f9f577031e06..71fe57d8023f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
+obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index e9adbe49f6a4..2bef8fa56c94 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -97,7 +97,6 @@ static int __devexit ad714x_i2c_remove(struct i2c_client *client)
struct ad714x_chip *chip = i2c_get_clientdata(client);
ad714x_remove(chip);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index ad730e15afc0..c19066479057 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -43,6 +43,7 @@
#include <linux/proc_fs.h>
#include <linux/poll.h>
#include <linux/rtc.h>
+#include <linux/smp_lock.h>
#include <linux/semaphore.h>
MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>");
@@ -64,8 +65,8 @@ static DECLARE_WAIT_QUEUE_HEAD(hp_sdc_rtc_wait);
static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
-static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
+static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait);
@@ -512,7 +513,7 @@ static int hp_sdc_rtc_read_proc(char *page, char **start, off_t off,
return len;
}
-static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
+static int hp_sdc_rtc_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
#if 1
@@ -659,14 +660,27 @@ static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
#endif
}
+static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = hp_sdc_rtc_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
+
static const struct file_operations hp_sdc_rtc_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = hp_sdc_rtc_read,
- .poll = hp_sdc_rtc_poll,
- .ioctl = hp_sdc_rtc_ioctl,
- .open = hp_sdc_rtc_open,
- .fasync = hp_sdc_rtc_fasync,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = hp_sdc_rtc_read,
+ .poll = hp_sdc_rtc_poll,
+ .unlocked_ioctl = hp_sdc_rtc_unlocked_ioctl,
+ .open = hp_sdc_rtc_open,
+ .fasync = hp_sdc_rtc_fasync,
};
static struct miscdevice hp_sdc_rtc_dev = {
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
new file mode 100644
index 000000000000..80af44608018
--- /dev/null
+++ b/drivers/input/misc/max8925_onkey.c
@@ -0,0 +1,148 @@
+/**
+ * max8925_onkey.c - MAX8925 ONKEY driver
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max8925.h>
+#include <linux/slab.h>
+
+#define HARDRESET_EN (1 << 7)
+#define PWREN_EN (1 << 7)
+
+struct max8925_onkey_info {
+ struct input_dev *idev;
+ struct i2c_client *i2c;
+ int irq;
+};
+
+/*
+ * MAX8925 gives us an interrupt when ONKEY is held for 3 seconds.
+ * max8925_set_bits() operates I2C bus and may sleep. So implement
+ * it in thread IRQ handler.
+ */
+static irqreturn_t max8925_onkey_handler(int irq, void *data)
+{
+ struct max8925_onkey_info *info = data;
+
+ input_report_key(info->idev, KEY_POWER, 1);
+ input_sync(info->idev);
+
+ /* Enable hardreset to halt if system isn't shutdown on time */
+ max8925_set_bits(info->i2c, MAX8925_SYSENSEL,
+ HARDRESET_EN, HARDRESET_EN);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit max8925_onkey_probe(struct platform_device *pdev)
+{
+ struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct max8925_onkey_info *info;
+ int error;
+
+ info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->i2c = chip->i2c;
+ info->irq = chip->irq_base + MAX8925_IRQ_GPM_SW_3SEC;
+
+ info->idev = input_allocate_device();
+ if (!info->idev) {
+ dev_err(chip->dev, "Failed to allocate input dev\n");
+ error = -ENOMEM;
+ goto out_input;
+ }
+
+ info->idev->name = "max8925_on";
+ info->idev->phys = "max8925_on/input0";
+ info->idev->id.bustype = BUS_I2C;
+ info->idev->dev.parent = &pdev->dev;
+ info->idev->evbit[0] = BIT_MASK(EV_KEY);
+ info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
+
+ error = request_threaded_irq(info->irq, NULL, max8925_onkey_handler,
+ IRQF_ONESHOT, "onkey", info);
+ if (error < 0) {
+ dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
+ info->irq, error);
+ goto out_irq;
+ }
+
+ error = input_register_device(info->idev);
+ if (error) {
+ dev_err(chip->dev, "Can't register input device: %d\n", error);
+ goto out;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+
+out:
+ free_irq(info->irq, info);
+out_irq:
+ input_free_device(info->idev);
+out_input:
+ kfree(info);
+ return error;
+}
+
+static int __devexit max8925_onkey_remove(struct platform_device *pdev)
+{
+ struct max8925_onkey_info *info = platform_get_drvdata(pdev);
+
+ free_irq(info->irq, info);
+ input_unregister_device(info->idev);
+ kfree(info);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver max8925_onkey_driver = {
+ .driver = {
+ .name = "max8925-onkey",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8925_onkey_probe,
+ .remove = __devexit_p(max8925_onkey_remove),
+};
+
+static int __init max8925_onkey_init(void)
+{
+ return platform_driver_register(&max8925_onkey_driver);
+}
+module_init(max8925_onkey_init);
+
+static void __exit max8925_onkey_exit(void)
+{
+ platform_driver_unregister(&max8925_onkey_driver);
+}
+module_exit(max8925_onkey_exit);
+
+MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 5c3ac4e0b055..0ac47d2898ec 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -168,8 +168,6 @@ static int __devexit pcf8574_kp_remove(struct i2c_client *client)
input_unregister_device(lp->idev);
kfree(lp);
- i2c_set_clientdata(client, NULL);
-
return 0;
}
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 0d45422f8095..1dacae4b43f0 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -259,8 +259,11 @@ static const struct of_device_id bbc_beep_match[] = {
};
static struct of_platform_driver bbc_beep_driver = {
- .name = "bbcbeep",
- .match_table = bbc_beep_match,
+ .driver = {
+ .name = "bbcbeep",
+ .owner = THIS_MODULE,
+ .of_match_table = bbc_beep_match,
+ },
.probe = bbc_beep_probe,
.remove = __devexit_p(bbc_remove),
.shutdown = sparcspkr_shutdown,
@@ -338,8 +341,11 @@ static const struct of_device_id grover_beep_match[] = {
};
static struct of_platform_driver grover_beep_driver = {
- .name = "groverbeep",
- .match_table = grover_beep_match,
+ .driver = {
+ .name = "groverbeep",
+ .owner = THIS_MODULE,
+ .of_match_table = grover_beep_match,
+ },
.probe = grover_beep_probe,
.remove = __devexit_p(grover_remove),
.shutdown = sparcspkr_shutdown,
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fee9eac8e04a..4f9b2afc24e8 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info)
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
(reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
- twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL);
+ twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
info->enabled = false;
}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 1477466076ad..b71eb55f2dbc 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -300,7 +300,7 @@ static int uinput_validate_absbits(struct input_dev *dev)
unsigned int cnt;
int retval = 0;
- for (cnt = 0; cnt < ABS_MAX + 1; cnt++) {
+ for (cnt = 0; cnt < ABS_CNT; cnt++) {
if (!test_bit(cnt, dev->absbit))
continue;
@@ -387,7 +387,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
dev->id.product = user_dev->id.product;
dev->id.version = user_dev->id.version;
- size = sizeof(int) * (ABS_MAX + 1);
+ size = sizeof(int) * ABS_CNT;
memcpy(dev->absmax, user_dev->absmax, size);
memcpy(dev->absmin, user_dev->absmin, size);
memcpy(dev->absfuzz, user_dev->absfuzz, size);
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index a185ac78a42c..ff5f61a0fd3a 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/setup.h>
@@ -34,10 +35,10 @@ MODULE_DESCRIPTION("Amiga mouse driver");
MODULE_LICENSE("GPL");
static int amimouse_lastx, amimouse_lasty;
-static struct input_dev *amimouse_dev;
-static irqreturn_t amimouse_interrupt(int irq, void *dummy)
+static irqreturn_t amimouse_interrupt(int irq, void *data)
{
+ struct input_dev *dev = data;
unsigned short joy0dat, potgor;
int nx, ny, dx, dy;
@@ -59,14 +60,14 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
potgor = amiga_custom.potgor;
- input_report_rel(amimouse_dev, REL_X, dx);
- input_report_rel(amimouse_dev, REL_Y, dy);
+ input_report_rel(dev, REL_X, dx);
+ input_report_rel(dev, REL_Y, dy);
- input_report_key(amimouse_dev, BTN_LEFT, ciaa.pra & 0x40);
- input_report_key(amimouse_dev, BTN_MIDDLE, potgor & 0x0100);
- input_report_key(amimouse_dev, BTN_RIGHT, potgor & 0x0400);
+ input_report_key(dev, BTN_LEFT, ciaa.pra & 0x40);
+ input_report_key(dev, BTN_MIDDLE, potgor & 0x0100);
+ input_report_key(dev, BTN_RIGHT, potgor & 0x0400);
- input_sync(amimouse_dev);
+ input_sync(dev);
return IRQ_HANDLED;
}
@@ -74,63 +75,90 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
static int amimouse_open(struct input_dev *dev)
{
unsigned short joy0dat;
+ int error;
joy0dat = amiga_custom.joy0dat;
amimouse_lastx = joy0dat & 0xff;
amimouse_lasty = joy0dat >> 8;
- if (request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", amimouse_interrupt)) {
- printk(KERN_ERR "amimouse.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
- return -EBUSY;
- }
+ error = request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse",
+ dev);
+ if (error)
+ dev_err(&dev->dev, "Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
- return 0;
+ return error;
}
static void amimouse_close(struct input_dev *dev)
{
- free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt);
+ free_irq(IRQ_AMIGA_VERTB, dev);
}
-static int __init amimouse_init(void)
+static int __init amimouse_probe(struct platform_device *pdev)
{
int err;
+ struct input_dev *dev;
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_MOUSE))
- return -ENODEV;
-
- amimouse_dev = input_allocate_device();
- if (!amimouse_dev)
+ dev = input_allocate_device();
+ if (!dev)
return -ENOMEM;
- amimouse_dev->name = "Amiga mouse";
- amimouse_dev->phys = "amimouse/input0";
- amimouse_dev->id.bustype = BUS_AMIGA;
- amimouse_dev->id.vendor = 0x0001;
- amimouse_dev->id.product = 0x0002;
- amimouse_dev->id.version = 0x0100;
+ dev->name = pdev->name;
+ dev->phys = "amimouse/input0";
+ dev->id.bustype = BUS_AMIGA;
+ dev->id.vendor = 0x0001;
+ dev->id.product = 0x0002;
+ dev->id.version = 0x0100;
- amimouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
- amimouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
- amimouse_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
+ dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
+ dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
- amimouse_dev->open = amimouse_open;
- amimouse_dev->close = amimouse_close;
+ dev->open = amimouse_open;
+ dev->close = amimouse_close;
+ dev->dev.parent = &pdev->dev;
- err = input_register_device(amimouse_dev);
+ err = input_register_device(dev);
if (err) {
- input_free_device(amimouse_dev);
+ input_free_device(dev);
return err;
}
+ platform_set_drvdata(pdev, dev);
+
return 0;
}
-static void __exit amimouse_exit(void)
+static int __exit amimouse_remove(struct platform_device *pdev)
{
- input_unregister_device(amimouse_dev);
+ struct input_dev *dev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ input_unregister_device(dev);
+ return 0;
+}
+
+static struct platform_driver amimouse_driver = {
+ .remove = __exit_p(amimouse_remove),
+ .driver = {
+ .name = "amiga-mouse",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amimouse_init(void)
+{
+ return platform_driver_probe(&amimouse_driver, amimouse_probe);
}
module_init(amimouse_init);
+
+static void __exit amimouse_exit(void)
+{
+ platform_driver_unregister(&amimouse_driver);
+}
+
module_exit(amimouse_exit);
+
+MODULE_ALIAS("platform:amiga-mouse");
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index 8291e7399ffa..0ae62f0bcb32 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -613,7 +613,6 @@ static int __devexit synaptics_i2c_remove(struct i2c_client *client)
free_irq(client->irq, touch);
input_unregister_device(touch->input);
- i2c_set_clientdata(client, NULL);
kfree(touch);
return 0;
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index f34f1dbeb577..3bfe8fafc6ad 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -21,7 +21,8 @@ if SERIO
config SERIO_I8042
tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86
default y
- depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && !M68K && !BLACKFIN
+ depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
+ (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN
help
i8042 is the chip over which the standard AT keyboard and PS/2
mouse are connected to the computer. If you use these devices,
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 5071af2c0604..04e32f2d1241 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -51,7 +51,7 @@ static inline void i8042_write_command(int val)
static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
dp = dp->child;
while (dp) {
@@ -96,8 +96,11 @@ static const struct of_device_id sparc_i8042_match[] = {
MODULE_DEVICE_TABLE(of, sparc_i8042_match);
static struct of_platform_driver sparc_i8042_driver = {
- .name = "i8042",
- .match_table = sparc_i8042_match,
+ .driver = {
+ .name = "i8042",
+ .owner = THIS_MODULE,
+ .of_match_table = sparc_i8042_match,
+ },
.probe = sparc_i8042_probe,
.remove = __devexit_p(sparc_i8042_remove),
};
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index f84f8e32e3f1..e2c028d2638f 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -244,17 +244,17 @@ static int __devinit xps2_of_probe(struct of_device *ofdev,
int error;
dev_info(dev, "Device Tree Probing \'%s\'\n",
- ofdev->node->name);
+ ofdev->dev.of_node->name);
/* Get iospace for the device */
- error = of_address_to_resource(ofdev->node, 0, &r_mem);
+ error = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
if (error) {
dev_err(dev, "invalid address\n");
return error;
}
/* Get IRQ for the device */
- if (of_irq_to_resource(ofdev->node, 0, &r_irq) == NO_IRQ) {
+ if (of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq) == NO_IRQ) {
dev_err(dev, "no IRQ found\n");
return -ENODEV;
}
@@ -342,7 +342,7 @@ static int __devexit xps2_of_remove(struct of_device *of_dev)
iounmap(drvdata->base_address);
/* Get iospace of the device */
- if (of_address_to_resource(of_dev->node, 0, &r_mem))
+ if (of_address_to_resource(of_dev->dev.of_node, 0, &r_mem))
dev_err(dev, "invalid address\n");
else
release_mem_region(r_mem.start, resource_size(&r_mem));
@@ -362,8 +362,11 @@ static const struct of_device_id xps2_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, xps2_of_match);
static struct of_platform_driver xps2_of_driver = {
- .name = DRIVER_NAME,
- .match_table = xps2_of_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = xps2_of_match,
+ },
.probe = xps2_of_probe,
.remove = __devexit_p(xps2_of_remove),
};
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 2dc0c07c0469..42ba3691d908 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -508,7 +508,6 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
}
input_dev->name = wacom_wac->name;
- input_dev->name = wacom_wac->name;
input_dev->dev.parent = &intf->dev;
input_dev->open = wacom_open;
input_dev->close = wacom_close;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 847fd0135bcf..d564af58175c 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -300,7 +300,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x823: /* Intuos3 Grip Pen */
case 0x813: /* Intuos3 Classic Pen */
case 0x885: /* Intuos3 Marker Pen */
- case 0x802: /* Intuos4 Grip Pen Eraser */
+ case 0x802: /* Intuos4 General Pen */
case 0x804: /* Intuos4 Marker Pen */
case 0x40802: /* Intuos4 Classic Pen */
case 0x022:
@@ -335,7 +335,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x81b: /* Intuos3 Classic Pen Eraser */
case 0x91b: /* Intuos3 Airbrush Eraser */
case 0x80c: /* Intuos4 Marker Pen Eraser */
- case 0x80a: /* Intuos4 Grip Pen Eraser */
+ case 0x80a: /* Intuos4 General Pen Eraser */
case 0x4080a: /* Intuos4 Classic Pen Eraser */
case 0x90a: /* Intuos4 Airbrush Eraser */
wacom->tool[idx] = BTN_TOOL_RUBBER;
@@ -356,6 +356,11 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
return 1;
}
+ /* older I4 styli don't work with new Cintiqs */
+ if (!((wacom->id[idx] >> 20) & 0x01) &&
+ (features->type == WACOM_21UX2))
+ return 1;
+
/* Exit report */
if ((data[1] & 0xfe) == 0x80) {
/*
@@ -474,21 +479,43 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_abs(input, ABS_MISC, 0);
}
} else {
- input_report_key(input, BTN_0, (data[5] & 0x01));
- input_report_key(input, BTN_1, (data[5] & 0x02));
- input_report_key(input, BTN_2, (data[5] & 0x04));
- input_report_key(input, BTN_3, (data[5] & 0x08));
- input_report_key(input, BTN_4, (data[6] & 0x01));
- input_report_key(input, BTN_5, (data[6] & 0x02));
- input_report_key(input, BTN_6, (data[6] & 0x04));
- input_report_key(input, BTN_7, (data[6] & 0x08));
- input_report_key(input, BTN_8, (data[5] & 0x10));
- input_report_key(input, BTN_9, (data[6] & 0x10));
+ if (features->type == WACOM_21UX2) {
+ input_report_key(input, BTN_0, (data[5] & 0x01));
+ input_report_key(input, BTN_1, (data[6] & 0x01));
+ input_report_key(input, BTN_2, (data[6] & 0x02));
+ input_report_key(input, BTN_3, (data[6] & 0x04));
+ input_report_key(input, BTN_4, (data[6] & 0x08));
+ input_report_key(input, BTN_5, (data[6] & 0x10));
+ input_report_key(input, BTN_6, (data[6] & 0x20));
+ input_report_key(input, BTN_7, (data[6] & 0x40));
+ input_report_key(input, BTN_8, (data[6] & 0x80));
+ input_report_key(input, BTN_9, (data[7] & 0x01));
+ input_report_key(input, BTN_A, (data[8] & 0x01));
+ input_report_key(input, BTN_B, (data[8] & 0x02));
+ input_report_key(input, BTN_C, (data[8] & 0x04));
+ input_report_key(input, BTN_X, (data[8] & 0x08));
+ input_report_key(input, BTN_Y, (data[8] & 0x10));
+ input_report_key(input, BTN_Z, (data[8] & 0x20));
+ input_report_key(input, BTN_BASE, (data[8] & 0x40));
+ input_report_key(input, BTN_BASE2, (data[8] & 0x80));
+ } else {
+ input_report_key(input, BTN_0, (data[5] & 0x01));
+ input_report_key(input, BTN_1, (data[5] & 0x02));
+ input_report_key(input, BTN_2, (data[5] & 0x04));
+ input_report_key(input, BTN_3, (data[5] & 0x08));
+ input_report_key(input, BTN_4, (data[6] & 0x01));
+ input_report_key(input, BTN_5, (data[6] & 0x02));
+ input_report_key(input, BTN_6, (data[6] & 0x04));
+ input_report_key(input, BTN_7, (data[6] & 0x08));
+ input_report_key(input, BTN_8, (data[5] & 0x10));
+ input_report_key(input, BTN_9, (data[6] & 0x10));
+ }
input_report_abs(input, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]);
input_report_abs(input, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]);
if ((data[5] & 0x1f) | (data[6] & 0x1f) | (data[1] & 0x1f) |
- data[2] | (data[3] & 0x1f) | data[4]) {
+ data[2] | (data[3] & 0x1f) | data[4] | data[8] |
+ (data[7] & 0x01)) {
input_report_key(input, wacom->tool[1], 1);
input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
} else {
@@ -640,7 +667,7 @@ static void wacom_tpc_finger_in(struct wacom_wac *wacom, char *data, int idx)
if (!idx)
input_report_key(input, BTN_TOUCH, 1);
input_event(input, EV_MSC, MSC_SERIAL, finger);
- input_sync(wacom->input);
+ input_sync(input);
wacom->last_finger = finger;
}
@@ -826,6 +853,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case INTUOS4L:
case CINTIQ:
case WACOM_BEE:
+ case WACOM_21UX2:
sync = wacom_intuos_irq(wacom_wac);
break;
@@ -921,6 +949,17 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_STYLUS2, input_dev->keybit);
break;
+ case WACOM_21UX2:
+ __set_bit(BTN_A, input_dev->keybit);
+ __set_bit(BTN_B, input_dev->keybit);
+ __set_bit(BTN_C, input_dev->keybit);
+ __set_bit(BTN_X, input_dev->keybit);
+ __set_bit(BTN_Y, input_dev->keybit);
+ __set_bit(BTN_Z, input_dev->keybit);
+ __set_bit(BTN_BASE, input_dev->keybit);
+ __set_bit(BTN_BASE2, input_dev->keybit);
+ /* fall through */
+
case WACOM_BEE:
__set_bit(BTN_8, input_dev->keybit);
__set_bit(BTN_9, input_dev->keybit);
@@ -1105,6 +1144,8 @@ static const struct wacom_features wacom_features_0xBA =
{ "Wacom Intuos4 8x13", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047, 63, INTUOS4L };
static const struct wacom_features wacom_features_0xBB =
{ "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, 63, INTUOS4L };
+static const struct wacom_features wacom_features_0xBC =
+ { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047, 63, INTUOS4 };
static const struct wacom_features wacom_features_0x3F =
{ "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, 63, CINTIQ };
static const struct wacom_features wacom_features_0xC5 =
@@ -1113,6 +1154,8 @@ static const struct wacom_features wacom_features_0xC6 =
{ "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023, 63, WACOM_BEE };
static const struct wacom_features wacom_features_0xC7 =
{ "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511, 0, PL };
+static const struct wacom_features wacom_features_0xCC =
+ { "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87200, 65600, 2047, 63, WACOM_21UX2 };
static const struct wacom_features wacom_features_0x90 =
{ "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC };
static const struct wacom_features wacom_features_0x93 =
@@ -1185,10 +1228,12 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xB9) },
{ USB_DEVICE_WACOM(0xBA) },
{ USB_DEVICE_WACOM(0xBB) },
+ { USB_DEVICE_WACOM(0xBC) },
{ USB_DEVICE_WACOM(0x3F) },
{ USB_DEVICE_WACOM(0xC5) },
{ USB_DEVICE_WACOM(0xC6) },
{ USB_DEVICE_WACOM(0xC7) },
+ { USB_DEVICE_WACOM(0xCC) },
{ USB_DEVICE_WACOM(0x90) },
{ USB_DEVICE_WACOM(0x93) },
{ USB_DEVICE_WACOM(0x9A) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 063f1af3204f..854b92092dfc 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -50,6 +50,7 @@ enum {
INTUOS4S,
INTUOS4,
INTUOS4L,
+ WACOM_21UX2,
CINTIQ,
WACOM_BEE,
WACOM_MO,
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index b9f58ca82fd1..3b9d5e2105d7 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -156,7 +156,7 @@ config TOUCHSCREEN_FUJITSU
config TOUCHSCREEN_S3C2410
tristate "Samsung S3C2410/generic touchscreen input driver"
depends on ARCH_S3C2410 || SAMSUNG_DEV_TS
- select S3C24XX_ADC
+ select S3C_ADC
help
Say Y here if you have the s3c2410 touchscreen.
@@ -590,4 +590,17 @@ config TOUCHSCREEN_PCAP
To compile this driver as a module, choose M here: the
module will be called pcap_ts.
+
+config TOUCHSCREEN_TPS6507X
+ tristate "TPS6507x based touchscreens"
+ depends on I2C
+ help
+ Say Y here if you have a TPS6507x based touchscreen
+ controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tps6507x_ts.
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 8ad36eef90a2..497964a7a214 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -46,3 +46,4 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_ATMEL) += atmel-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
+obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 794d070c6900..4b32fb4704cd 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -812,10 +812,8 @@ static int __devinit ad7879_probe(struct i2c_client *client,
ts->bus = client;
error = ad7879_construct(client, ts);
- if (error) {
- i2c_set_clientdata(client, NULL);
+ if (error)
kfree(ts);
- }
return error;
}
@@ -825,7 +823,6 @@ static int __devexit ad7879_remove(struct i2c_client *client)
struct ad7879 *ts = dev_get_drvdata(&client->dev);
ad7879_destroy(client, ts);
- i2c_set_clientdata(client, NULL);
kfree(ts);
return 0;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 532279cda0e4..a9fdf55c0238 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1163,8 +1163,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
ts->reg = regulator_get(&spi->dev, "vcc");
if (IS_ERR(ts->reg)) {
- dev_err(&spi->dev, "unable to get regulator: %ld\n",
- PTR_ERR(ts->reg));
+ err = PTR_ERR(ts->reg);
+ dev_err(&spi->dev, "unable to get regulator: %d\n", err);
goto err_free_gpio;
}
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 75f8b73010fa..7a3a916f84a8 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -238,7 +238,6 @@ err2:
input = NULL; /* so we dont try to free it below */
err1:
input_free_device(input);
- i2c_set_clientdata(client, NULL);
kfree(priv);
err0:
return err;
@@ -256,7 +255,6 @@ static int __devexit eeti_ts_remove(struct i2c_client *client)
enable_irq(priv->irq);
input_unregister_device(priv->input);
- i2c_set_clientdata(client, NULL);
kfree(priv);
return 0;
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index ce8ab0269f6f..1fb0c2f06a44 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -256,7 +256,6 @@ static int __devexit mcs5000_ts_remove(struct i2c_client *client)
free_irq(client->irq, data);
input_unregister_device(data->input_dev);
kfree(data);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index e0b7c834111d..6085d12fd561 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -173,7 +173,7 @@ static irqreturn_t stylus_irq(int irq, void *dev_id)
if (down)
s3c_adc_start(ts.client, 0, 1 << ts.shift);
else
- dev_info(ts.dev, "%s: count=%d\n", __func__, ts.count);
+ dev_dbg(ts.dev, "%s: count=%d\n", __func__, ts.count);
if (ts.features & FEAT_PEN_IRQ) {
/* Clear pen down/up interrupt */
@@ -413,6 +413,8 @@ static struct dev_pm_ops s3c_ts_pmops = {
#endif
static struct platform_device_id s3cts_driver_ids[] = {
+ { "s3c2410-ts", 0 },
+ { "s3c2440-ts", 0 },
{ "s3c64xx-ts", FEAT_PEN_IRQ },
{ }
};
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
new file mode 100644
index 000000000000..5b70a1419b4d
--- /dev/null
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -0,0 +1,396 @@
+/*
+ * drivers/input/touchscreen/tps6507x_ts.c
+ *
+ * Touchscreen driver for the tps6507x chip.
+ *
+ * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
+ *
+ * Credits:
+ *
+ * Using code from tsc2007, MtekVision Co., Ltd.
+ *
+ * For licencing details see kernel-base/COPYING
+ *
+ * TPS65070, TPS65073, TPS650731, and TPS650732 support
+ * 10 bit touch screen interface.
+ */
+
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/tps6507x.h>
+#include <linux/input/tps6507x-ts.h>
+#include <linux/delay.h>
+
+#define TSC_DEFAULT_POLL_PERIOD 30 /* ms */
+#define TPS_DEFAULT_MIN_PRESSURE 0x30
+#define MAX_10BIT ((1 << 10) - 1)
+
+#define TPS6507X_ADCONFIG_CONVERT_TS (TPS6507X_ADCONFIG_AD_ENABLE | \
+ TPS6507X_ADCONFIG_START_CONVERSION | \
+ TPS6507X_ADCONFIG_INPUT_REAL_TSC)
+#define TPS6507X_ADCONFIG_POWER_DOWN_TS (TPS6507X_ADCONFIG_INPUT_REAL_TSC)
+
+struct ts_event {
+ u16 x;
+ u16 y;
+ u16 pressure;
+};
+
+struct tps6507x_ts {
+ struct input_dev *input_dev;
+ struct device *dev;
+ char phys[32];
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+ unsigned polling; /* polling is active */
+ struct ts_event tc;
+ struct tps6507x_dev *mfd;
+ u16 model;
+ unsigned pendown;
+ int irq;
+ void (*clear_penirq)(void);
+ unsigned long poll_period; /* ms */
+ u16 min_pressure;
+ int vref; /* non-zero to leave vref on */
+};
+
+static int tps6507x_read_u8(struct tps6507x_ts *tsc, u8 reg, u8 *data)
+{
+ int err;
+
+ err = tsc->mfd->read_dev(tsc->mfd, reg, 1, data);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tps6507x_write_u8(struct tps6507x_ts *tsc, u8 reg, u8 data)
+{
+ return tsc->mfd->write_dev(tsc->mfd, reg, 1, &data);
+}
+
+static s32 tps6507x_adc_conversion(struct tps6507x_ts *tsc,
+ u8 tsc_mode, u16 *value)
+{
+ s32 ret;
+ u8 adc_status;
+ u8 result;
+
+ /* Route input signal to A/D converter */
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, tsc_mode);
+ if (ret) {
+ dev_err(tsc->dev, "TSC mode read failed\n");
+ goto err;
+ }
+
+ /* Start A/D conversion */
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
+ TPS6507X_ADCONFIG_CONVERT_TS);
+ if (ret) {
+ dev_err(tsc->dev, "ADC config write failed\n");
+ return ret;
+ }
+
+ do {
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADCONFIG,
+ &adc_status);
+ if (ret) {
+ dev_err(tsc->dev, "ADC config read failed\n");
+ goto err;
+ }
+ } while (adc_status & TPS6507X_ADCONFIG_START_CONVERSION);
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_2, &result);
+ if (ret) {
+ dev_err(tsc->dev, "ADC result 2 read failed\n");
+ goto err;
+ }
+
+ *value = (result & TPS6507X_REG_ADRESULT_2_MASK) << 8;
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_1, &result);
+ if (ret) {
+ dev_err(tsc->dev, "ADC result 1 read failed\n");
+ goto err;
+ }
+
+ *value |= result;
+
+ dev_dbg(tsc->dev, "TSC channel %d = 0x%X\n", tsc_mode, *value);
+
+err:
+ return ret;
+}
+
+/* Need to call tps6507x_adc_standby() after using A/D converter for the
+ * touch screen interrupt to work properly.
+ */
+
+static s32 tps6507x_adc_standby(struct tps6507x_ts *tsc)
+{
+ s32 ret;
+ s32 loops = 0;
+ u8 val;
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
+ TPS6507X_ADCONFIG_INPUT_TSC);
+ if (ret)
+ return ret;
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE,
+ TPS6507X_TSCMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
+ if (ret)
+ return ret;
+
+ while (val & TPS6507X_REG_TSC_INT) {
+ mdelay(10);
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
+ if (ret)
+ return ret;
+ loops++;
+ }
+
+ return ret;
+}
+
+static void tps6507x_ts_handler(struct work_struct *work)
+{
+ struct tps6507x_ts *tsc = container_of(work,
+ struct tps6507x_ts, work.work);
+ struct input_dev *input_dev = tsc->input_dev;
+ int pendown;
+ int schd;
+ int poll = 0;
+ s32 ret;
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_PRESSURE,
+ &tsc->tc.pressure);
+ if (ret)
+ goto done;
+
+ pendown = tsc->tc.pressure > tsc->min_pressure;
+
+ if (unlikely(!pendown && tsc->pendown)) {
+ dev_dbg(tsc->dev, "UP\n");
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_report_abs(input_dev, ABS_PRESSURE, 0);
+ input_sync(input_dev);
+ tsc->pendown = 0;
+ }
+
+ if (pendown) {
+
+ if (!tsc->pendown) {
+ dev_dbg(tsc->dev, "DOWN\n");
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ } else
+ dev_dbg(tsc->dev, "still down\n");
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_X_POSITION,
+ &tsc->tc.x);
+ if (ret)
+ goto done;
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_Y_POSITION,
+ &tsc->tc.y);
+ if (ret)
+ goto done;
+
+ input_report_abs(input_dev, ABS_X, tsc->tc.x);
+ input_report_abs(input_dev, ABS_Y, tsc->tc.y);
+ input_report_abs(input_dev, ABS_PRESSURE, tsc->tc.pressure);
+ input_sync(input_dev);
+ tsc->pendown = 1;
+ poll = 1;
+ }
+
+done:
+ /* always poll if not using interrupts */
+ poll = 1;
+
+ if (poll) {
+ schd = queue_delayed_work(tsc->wq, &tsc->work,
+ msecs_to_jiffies(tsc->poll_period));
+ if (schd)
+ tsc->polling = 1;
+ else {
+ tsc->polling = 0;
+ dev_err(tsc->dev, "re-schedule failed");
+ }
+ } else
+ tsc->polling = 0;
+
+ ret = tps6507x_adc_standby(tsc);
+}
+
+static int tps6507x_ts_probe(struct platform_device *pdev)
+{
+ int error;
+ struct tps6507x_ts *tsc;
+ struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
+ struct touchscreen_init_data *init_data;
+ struct input_dev *input_dev;
+ struct tps6507x_board *tps_board;
+ int schd;
+
+ /**
+ * tps_board points to pmic related constants
+ * coming from the board-evm file.
+ */
+
+ tps_board = (struct tps6507x_board *)tps6507x_dev->dev->platform_data;
+
+ if (!tps_board) {
+ dev_err(tps6507x_dev->dev,
+ "Could not find tps6507x platform data\n");
+ return -EIO;
+ }
+
+ /**
+ * init_data points to array of regulator_init structures
+ * coming from the board-evm file.
+ */
+
+ init_data = tps_board->tps6507x_ts_init_data;
+
+ tsc = kzalloc(sizeof(struct tps6507x_ts), GFP_KERNEL);
+ if (!tsc) {
+ dev_err(tps6507x_dev->dev, "failed to allocate driver data\n");
+ error = -ENOMEM;
+ goto err0;
+ }
+
+ tps6507x_dev->ts = tsc;
+ tsc->mfd = tps6507x_dev;
+ tsc->dev = tps6507x_dev->dev;
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(tsc->dev, "Failed to allocate input device.\n");
+ error = -ENOMEM;
+ goto err1;
+ }
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_10BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_10BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_10BIT, 0, 0);
+
+ input_dev->name = "TPS6507x Touchscreen";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = tsc->dev;
+
+ snprintf(tsc->phys, sizeof(tsc->phys),
+ "%s/input0", dev_name(tsc->dev));
+ input_dev->phys = tsc->phys;
+
+ dev_dbg(tsc->dev, "device: %s\n", input_dev->phys);
+
+ input_set_drvdata(input_dev, tsc);
+
+ tsc->input_dev = input_dev;
+
+ INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler);
+ tsc->wq = create_workqueue("TPS6507x Touchscreen");
+
+ if (init_data) {
+ tsc->poll_period = init_data->poll_period;
+ tsc->vref = init_data->vref;
+ tsc->min_pressure = init_data->min_pressure;
+ input_dev->id.vendor = init_data->vendor;
+ input_dev->id.product = init_data->product;
+ input_dev->id.version = init_data->version;
+ } else {
+ tsc->poll_period = TSC_DEFAULT_POLL_PERIOD;
+ tsc->min_pressure = TPS_DEFAULT_MIN_PRESSURE;
+ }
+
+ error = tps6507x_adc_standby(tsc);
+ if (error)
+ goto err2;
+
+ error = input_register_device(input_dev);
+ if (error)
+ goto err2;
+
+ schd = queue_delayed_work(tsc->wq, &tsc->work,
+ msecs_to_jiffies(tsc->poll_period));
+
+ if (schd)
+ tsc->polling = 1;
+ else {
+ tsc->polling = 0;
+ dev_err(tsc->dev, "schedule failed");
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ cancel_delayed_work_sync(&tsc->work);
+ destroy_workqueue(tsc->wq);
+ input_free_device(input_dev);
+err1:
+ kfree(tsc);
+ tps6507x_dev->ts = NULL;
+err0:
+ return error;
+}
+
+static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
+{
+ struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
+ struct tps6507x_ts *tsc = tps6507x_dev->ts;
+ struct input_dev *input_dev = tsc->input_dev;
+
+ if (!tsc)
+ return 0;
+
+ cancel_delayed_work_sync(&tsc->work);
+ destroy_workqueue(tsc->wq);
+
+ input_free_device(input_dev);
+
+ tps6507x_dev->ts = NULL;
+ kfree(tsc);
+
+ return 0;
+}
+
+static struct platform_driver tps6507x_ts_driver = {
+ .driver = {
+ .name = "tps6507x-ts",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6507x_ts_probe,
+ .remove = __devexit_p(tps6507x_ts_remove),
+};
+
+static int __init tps6507x_ts_init(void)
+{
+ return platform_driver_register(&tps6507x_ts_driver);
+}
+module_init(tps6507x_ts_init);
+
+static void __exit tps6507x_ts_exit(void)
+{
+ platform_driver_unregister(&tps6507x_ts_driver);
+}
+module_exit(tps6507x_ts_exit);
+
+MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
+MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps6507x-tsc");
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 769b479fcaa6..be23780e8a3e 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -347,8 +347,6 @@ static int __devexit tsc2007_remove(struct i2c_client *client)
struct tsc2007 *ts = i2c_get_clientdata(client);
struct tsc2007_platform_data *pdata = client->dev.platform_data;
- i2c_set_clientdata(client, NULL);
-
tsc2007_free_irq(ts);
if (pdata->exit_platform_hw)
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 29a8bbf3f086..567d57215c28 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -857,6 +857,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
if ((pkt[0] & 0xe0) != 0xe0)
return 0;
+ if (be16_to_cpu(packet->data_len) > 0xff)
+ packet->data_len = cpu_to_be16(be16_to_cpu(packet->data_len) - 0x100);
+ if (be16_to_cpu(packet->x_len) > 0xff)
+ packet->x_len = cpu_to_be16(be16_to_cpu(packet->x_len) - 0x80);
+
/* send ACK */
ret = usb_submit_urb(priv->ack, GFP_ATOMIC);
@@ -1112,7 +1117,7 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
[DEVTYPE_NEXIO] = {
- .rept_size = 128,
+ .rept_size = 1024,
.irq_always = true,
.read_data = nexio_read_data,
.init = nexio_init,
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index ee5837522f5a..0cabe31f26df 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -787,8 +787,7 @@ capi_poll(struct file *file, poll_table * wait)
}
static int
-capi_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+capi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct capidev *cdev = file->private_data;
capi_ioctl_struct data;
@@ -981,6 +980,18 @@ register_out:
}
}
+static long
+capi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = capi_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static int capi_open(struct inode *inode, struct file *file)
{
struct capidev *cdev;
@@ -1026,7 +1037,7 @@ static const struct file_operations capi_fops =
.read = capi_read,
.write = capi_write,
.poll = capi_poll,
- .ioctl = capi_ioctl,
+ .unlocked_ioctl = capi_unlocked_ioctl,
.open = capi_open,
.release = capi_release,
};
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index bd00dceacaf0..b054494df846 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1020,12 +1020,12 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
if (cmd == AVMB1_ADDCARD) {
if ((retval = copy_from_user(&cdef, data,
sizeof(avmb1_carddef))))
- return retval;
+ return -EFAULT;
cdef.cardtype = AVM_CARDTYPE_B1;
} else {
if ((retval = copy_from_user(&cdef, data,
sizeof(avmb1_extcarddef))))
- return retval;
+ return -EFAULT;
}
cparams.port = cdef.port;
cparams.irq = cdef.irq;
@@ -1147,6 +1147,12 @@ load_unlock_out:
if (ctr->state == CAPI_CTR_DETECTED)
goto reset_unlock_out;
+ if (ctr->reset_ctr == NULL) {
+ printk(KERN_DEBUG "kcapi: reset: no reset function\n");
+ retval = -ESRCH;
+ goto reset_unlock_out;
+ }
+
ctr->reset_ctr(ctr);
retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
@@ -1212,7 +1218,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
kcapi_carddef cdef;
if ((retval = copy_from_user(&cdef, data, sizeof(cdef))))
- return retval;
+ return -EFAULT;
cparams.port = cdef.port;
cparams.irq = cdef.irq;
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 964a55fb1486..8f78f15c8ef7 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -170,17 +170,6 @@ static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
}
/*
- * convert hex to binary
- */
-static inline u8 hex2bin(char c)
-{
- int result = c & 0x0f;
- if (c & 0x40)
- result += 9;
- return result;
-}
-
-/*
* convert an IE from Gigaset hex string to ETSI binary representation
* including length byte
* return value: result length, -1 on error
@@ -191,7 +180,7 @@ static int encode_ie(char *in, u8 *out, int maxlen)
while (*in) {
if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen)
return -1;
- out[++l] = (hex2bin(in[0]) << 4) + hex2bin(in[1]);
+ out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]);
in += 2;
}
out[0] = l;
@@ -933,30 +922,6 @@ void gigaset_isdn_stop(struct cardstate *cs)
*/
/*
- * load firmware
- */
-static int gigaset_load_firmware(struct capi_ctr *ctr, capiloaddata *data)
-{
- struct cardstate *cs = ctr->driverdata;
-
- /* AVM specific operation, not needed for Gigaset -- ignore */
- dev_notice(cs->dev, "load_firmware ignored\n");
-
- return 0;
-}
-
-/*
- * reset (deactivate) controller
- */
-static void gigaset_reset_ctr(struct capi_ctr *ctr)
-{
- struct cardstate *cs = ctr->driverdata;
-
- /* AVM specific operation, not needed for Gigaset -- ignore */
- dev_notice(cs->dev, "reset_ctr ignored\n");
-}
-
-/*
* register CAPI application
*/
static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
@@ -2213,8 +2178,8 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
iif->ctr.driverdata = cs;
strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name));
iif->ctr.driver_name = "gigaset";
- iif->ctr.load_firmware = gigaset_load_firmware;
- iif->ctr.reset_ctr = gigaset_reset_ctr;
+ iif->ctr.load_firmware = NULL;
+ iif->ctr.reset_ctr = NULL;
iif->ctr.register_appl = gigaset_register_appl;
iif->ctr.release_appl = gigaset_release_appl;
iif->ctr.send_message = gigaset_send_message;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index b3b7e2879bac..8700474747e8 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -97,8 +97,10 @@ static int write_reg(struct hfcsusb *hw, __u8 reg, __u8 val)
hw->name, __func__, reg, val);
spin_lock(&hw->ctrl_lock);
- if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE)
+ if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) {
+ spin_unlock(&hw->ctrl_lock);
return 1;
+ }
buf = &hw->ctrl_buff[hw->ctrl_in_idx];
buf->hfcs_reg = reg;
buf->reg_val = val;
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 0a3553df065f..54ae71a907f9 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -320,12 +320,12 @@ inittiger(struct tiger_hw *card)
return -ENOMEM;
}
for (i = 0; i < 2; i++) {
- card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_KERNEL);
+ card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
if (!card->bc[i].hsbuf) {
pr_info("%s: no B%d send buffer\n", card->name, i + 1);
return -ENOMEM;
}
- card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_KERNEL);
+ card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
if (!card->bc[i].hrbuf) {
pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
return -ENOMEM;
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 1925118122f8..8b0a7d86b30f 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -74,9 +74,10 @@ static struct pnp_device_id fcpnp_ids[] __devinitdata = {
.id = "AVM0900",
.driver_data = (unsigned long) "Fritz!Card PnP",
},
+ { .id = "" }
};
-MODULE_DEVICE_TABLE(isapnp, fcpnp_ids);
+MODULE_DEVICE_TABLE(pnp, fcpnp_ids);
#endif
static int protocol = 2; /* EURO-ISDN Default */
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 70044ee4b228..a44cdb492ea9 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1272,9 +1272,9 @@ isdn_poll(struct file *file, poll_table * wait)
static int
-isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+isdn_ioctl(struct file *file, uint cmd, ulong arg)
{
- uint minor = iminor(inode);
+ uint minor = iminor(file->f_path.dentry->d_inode);
isdn_ctrl c;
int drvidx;
int chidx;
@@ -1722,6 +1722,18 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
#undef cfg
}
+static long
+isdn_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = isdn_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* Open the device code.
*/
@@ -1838,7 +1850,7 @@ static const struct file_operations isdn_fops =
.read = isdn_read,
.write = isdn_write,
.poll = isdn_poll,
- .ioctl = isdn_ioctl,
+ .unlocked_ioctl = isdn_unlocked_ioctl,
.open = isdn_open,
.release = isdn_close,
};
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 8785004e85e0..81048b8ed8ad 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -24,6 +24,7 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mISDNif.h>
+#include <linux/smp_lock.h>
#include "core.h"
static u_int *debug;
@@ -97,8 +98,6 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
filep, buf, (int)count, off);
- if (*off != filep->f_pos)
- return -ESPIPE;
if (list_empty(&dev->expired) && (dev->work == 0)) {
if (filep->f_flags & O_NONBLOCK)
@@ -215,9 +214,8 @@ unlock:
return ret;
}
-static int
-mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
- unsigned long arg)
+static long
+mISDN_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct mISDNtimerdev *dev = filep->private_data;
int id, tout, ret = 0;
@@ -226,6 +224,7 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__,
filep, cmd, arg);
+ lock_kernel();
switch (cmd) {
case IMADDTIMER:
if (get_user(tout, (int __user *)arg)) {
@@ -257,13 +256,14 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
default:
ret = -EINVAL;
}
+ unlock_kernel();
return ret;
}
static const struct file_operations mISDN_fops = {
.read = mISDN_read,
.poll = mISDN_poll,
- .ioctl = mISDN_ioctl,
+ .unlocked_ioctl = mISDN_ioctl,
.open = mISDN_open,
.release = mISDN_close,
};
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 505eb64c329c..81bf25e67ce1 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -21,7 +21,7 @@ comment "LED drivers"
config LEDS_88PM860X
tristate "LED Support for Marvell 88PM860x PMIC"
- depends on LEDS_CLASS && MFD_88PM860X
+ depends on MFD_88PM860X
help
This option enables support for on-chip LED drivers found on Marvell
Semiconductor 88PM8606 PMIC.
@@ -67,6 +67,16 @@ config LEDS_NET48XX
This option enables support for the Soekris net4801 and net4826 error
LED.
+config LEDS_NET5501
+ tristate "LED Support for Soekris net5501 series Error LED"
+ depends on LEDS_TRIGGERS
+ depends on X86 && LEDS_GPIO_PLATFORM && GPIO_CS5535
+ select LEDS_TRIGGER_DEFAULT_ON
+ default n
+ help
+ Add support for the Soekris net5501 board (detection, error led
+ and GPIO).
+
config LEDS_FSG
tristate "LED Support for the Freecom FSG-3"
depends on MACH_FSG
@@ -285,6 +295,13 @@ config LEDS_DELL_NETBOOKS
This adds support for the Latitude 2100 and similar
notebooks that have an external LED.
+config LEDS_MC13783
+ tristate "LED Support for MC13783 PMIC"
+ depends on MFD_MC13783
+ help
+ This option enable support for on-chip LED drivers found
+ on Freescale Semiconductor MC13783 PMIC.
+
config LEDS_TRIGGERS
bool "LED Trigger support"
help
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 0cd8b9957380..2493de499374 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
+obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o
obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o
obj-$(CONFIG_LEDS_H1940) += leds-h1940.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o
+obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 69e7d86a5143..260660076507 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -74,7 +74,7 @@ static ssize_t led_max_brightness_show(struct device *dev,
static struct device_attribute led_class_attrs[] = {
__ATTR(brightness, 0644, led_brightness_show, led_brightness_store),
- __ATTR(max_brightness, 0644, led_max_brightness_show, NULL),
+ __ATTR(max_brightness, 0444, led_max_brightness_show, NULL),
#ifdef CONFIG_LEDS_TRIGGERS
__ATTR(trigger, 0644, led_trigger_show, led_trigger_store),
#endif
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 16a60c06c96c..b7677106cff8 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -256,8 +256,10 @@ static int pm860x_led_probe(struct platform_device *pdev)
if (pdev->dev.parent->platform_data) {
pm860x_pdata = pdev->dev.parent->platform_data;
pdata = pm860x_pdata->led;
- } else
- pdata = NULL;
+ } else {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -EINVAL;
+ }
data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL);
if (data == NULL)
@@ -268,8 +270,11 @@ static int pm860x_led_probe(struct platform_device *pdev)
data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
data->iset = pdata->iset;
data->port = __check_device(pdata, data->name);
- if (data->port < 0)
+ if (data->port < 0) {
+ dev_err(&pdev->dev, "check device failed\n");
+ kfree(data);
return -EINVAL;
+ }
data->current_brightness = 0;
data->cdev.name = data->name;
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 286b501a3573..5dcdf9d69b3a 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -742,7 +742,6 @@ failed_unregister_dev_file:
for (i--; i >= 0; i--)
device_remove_file(&led->client->dev, bd2802_attributes[i]);
failed_free:
- i2c_set_clientdata(client, NULL);
kfree(led);
return ret;
@@ -759,7 +758,6 @@ static int __exit bd2802_remove(struct i2c_client *client)
bd2802_disable_adv_conf(led);
for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
device_remove_file(&led->client->dev, bd2802_attributes[i]);
- i2c_set_clientdata(client, NULL);
kfree(led);
return 0;
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index c6e4b772b757..cc22eeefa10b 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -26,7 +26,8 @@ struct gpio_led_data {
u8 new_level;
u8 can_sleep;
u8 active_low;
- int (*platform_gpio_blink_set)(unsigned gpio,
+ u8 blinking;
+ int (*platform_gpio_blink_set)(unsigned gpio, int state,
unsigned long *delay_on, unsigned long *delay_off);
};
@@ -35,7 +36,13 @@ static void gpio_led_work(struct work_struct *work)
struct gpio_led_data *led_dat =
container_of(work, struct gpio_led_data, work);
- gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level);
+ if (led_dat->blinking) {
+ led_dat->platform_gpio_blink_set(led_dat->gpio,
+ led_dat->new_level,
+ NULL, NULL);
+ led_dat->blinking = 0;
+ } else
+ gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level);
}
static void gpio_led_set(struct led_classdev *led_cdev,
@@ -60,8 +67,14 @@ static void gpio_led_set(struct led_classdev *led_cdev,
if (led_dat->can_sleep) {
led_dat->new_level = level;
schedule_work(&led_dat->work);
- } else
- gpio_set_value(led_dat->gpio, level);
+ } else {
+ if (led_dat->blinking) {
+ led_dat->platform_gpio_blink_set(led_dat->gpio, level,
+ NULL, NULL);
+ led_dat->blinking = 0;
+ } else
+ gpio_set_value(led_dat->gpio, level);
+ }
}
static int gpio_blink_set(struct led_classdev *led_cdev,
@@ -70,12 +83,14 @@ static int gpio_blink_set(struct led_classdev *led_cdev,
struct gpio_led_data *led_dat =
container_of(led_cdev, struct gpio_led_data, cdev);
- return led_dat->platform_gpio_blink_set(led_dat->gpio, delay_on, delay_off);
+ led_dat->blinking = 1;
+ return led_dat->platform_gpio_blink_set(led_dat->gpio, GPIO_LED_BLINK,
+ delay_on, delay_off);
}
static int __devinit create_gpio_led(const struct gpio_led *template,
struct gpio_led_data *led_dat, struct device *parent,
- int (*blink_set)(unsigned, unsigned long *, unsigned long *))
+ int (*blink_set)(unsigned, int, unsigned long *, unsigned long *))
{
int ret, state;
@@ -97,6 +112,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
led_dat->gpio = template->gpio;
led_dat->can_sleep = gpio_cansleep(template->gpio);
led_dat->active_low = template->active_low;
+ led_dat->blinking = 0;
if (blink_set) {
led_dat->platform_gpio_blink_set = blink_set;
led_dat->cdev.blink_set = gpio_blink_set;
@@ -113,7 +129,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
if (ret < 0)
goto err;
-
+
INIT_WORK(&led_dat->work, gpio_led_work);
ret = led_classdev_register(parent, &led_dat->cdev);
@@ -211,7 +227,7 @@ struct gpio_led_of_platform_data {
static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node, *child;
+ struct device_node *np = ofdev->dev.of_node, *child;
struct gpio_led_of_platform_data *pdata;
int count = 0, ret;
@@ -291,8 +307,8 @@ static struct of_platform_driver of_gpio_leds_driver = {
.driver = {
.name = "of_gpio_leds",
.owner = THIS_MODULE,
+ .of_match_table = of_gpio_leds_match,
},
- .match_table = of_gpio_leds_match,
.probe = of_gpio_leds_probe,
.remove = __devexit_p(of_gpio_leds_remove),
};
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 8d5ecceba181..9010c054615e 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -379,6 +379,7 @@ static int __devinit lp3944_probe(struct i2c_client *client,
{
struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
struct lp3944_data *data;
+ int err;
if (lp3944_pdata == NULL) {
dev_err(&client->dev, "no platform data\n");
@@ -401,9 +402,13 @@ static int __devinit lp3944_probe(struct i2c_client *client,
mutex_init(&data->lock);
- dev_info(&client->dev, "lp3944 enabled\n");
+ err = lp3944_configure(client, data, lp3944_pdata);
+ if (err < 0) {
+ kfree(data);
+ return err;
+ }
- lp3944_configure(client, data, lp3944_pdata);
+ dev_info(&client->dev, "lp3944 enabled\n");
return 0;
}
@@ -427,7 +432,6 @@ static int __devexit lp3944_remove(struct i2c_client *client)
}
kfree(data);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
new file mode 100644
index 000000000000..f05bb08d0f09
--- /dev/null
+++ b/drivers/leds/leds-mc13783.c
@@ -0,0 +1,403 @@
+/*
+ * LEDs driver for Freescale MC13783
+ *
+ * Copyright (C) 2010 Philippe Rétornaz
+ *
+ * Based on leds-da903x:
+ * Copyright (C) 2008 Compulab, Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * Copyright (C) 2006-2008 Marvell International Ltd.
+ * Eric Miao <eric.miao@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/mc13783.h>
+#include <linux/slab.h>
+
+struct mc13783_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct mc13783 *master;
+ enum led_brightness new_brightness;
+ int id;
+};
+
+#define MC13783_REG_LED_CONTROL_0 51
+#define MC13783_LED_C0_ENABLE_BIT (1 << 0)
+#define MC13783_LED_C0_TRIODE_MD_BIT (1 << 7)
+#define MC13783_LED_C0_TRIODE_AD_BIT (1 << 8)
+#define MC13783_LED_C0_TRIODE_KP_BIT (1 << 9)
+#define MC13783_LED_C0_BOOST_BIT (1 << 10)
+#define MC13783_LED_C0_ABMODE_MASK 0x7
+#define MC13783_LED_C0_ABMODE 11
+#define MC13783_LED_C0_ABREF_MASK 0x3
+#define MC13783_LED_C0_ABREF 14
+
+#define MC13783_REG_LED_CONTROL_1 52
+#define MC13783_LED_C1_TC1HALF_BIT (1 << 18)
+
+#define MC13783_REG_LED_CONTROL_2 53
+#define MC13783_LED_C2_BL_P_MASK 0xf
+#define MC13783_LED_C2_MD_P 9
+#define MC13783_LED_C2_AD_P 13
+#define MC13783_LED_C2_KP_P 17
+#define MC13783_LED_C2_BL_C_MASK 0x7
+#define MC13783_LED_C2_MD_C 0
+#define MC13783_LED_C2_AD_C 3
+#define MC13783_LED_C2_KP_C 6
+
+#define MC13783_REG_LED_CONTROL_3 54
+#define MC13783_LED_C3_TC_P 6
+#define MC13783_LED_C3_TC_P_MASK 0x1f
+
+#define MC13783_REG_LED_CONTROL_4 55
+#define MC13783_REG_LED_CONTROL_5 56
+
+#define MC13783_LED_Cx_PERIOD 21
+#define MC13783_LED_Cx_PERIOD_MASK 0x3
+#define MC13783_LED_Cx_SLEWLIM_BIT (1 << 23)
+#define MC13783_LED_Cx_TRIODE_TC_BIT (1 << 23)
+#define MC13783_LED_Cx_TC_C_MASK 0x3
+
+static void mc13783_led_work(struct work_struct *work)
+{
+ struct mc13783_led *led = container_of(work, struct mc13783_led, work);
+ int reg = 0;
+ int mask = 0;
+ int value = 0;
+ int bank, off, shift;
+
+ switch (led->id) {
+ case MC13783_LED_MD:
+ reg = MC13783_REG_LED_CONTROL_2;
+ mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_MD_P;
+ value = (led->new_brightness >> 4) << MC13783_LED_C2_MD_P;
+ break;
+ case MC13783_LED_AD:
+ reg = MC13783_REG_LED_CONTROL_2;
+ mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_AD_P;
+ value = (led->new_brightness >> 4) << MC13783_LED_C2_AD_P;
+ break;
+ case MC13783_LED_KP:
+ reg = MC13783_REG_LED_CONTROL_2;
+ mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_KP_P;
+ value = (led->new_brightness >> 4) << MC13783_LED_C2_KP_P;
+ break;
+ case MC13783_LED_R1:
+ case MC13783_LED_G1:
+ case MC13783_LED_B1:
+ case MC13783_LED_R2:
+ case MC13783_LED_G2:
+ case MC13783_LED_B2:
+ case MC13783_LED_R3:
+ case MC13783_LED_G3:
+ case MC13783_LED_B3:
+ off = led->id - MC13783_LED_R1;
+ bank = off/3;
+ reg = MC13783_REG_LED_CONTROL_3 + off/3;
+ shift = (off - bank * 3) * 5 + MC13783_LED_C3_TC_P;
+ value = (led->new_brightness >> 3) << shift;
+ mask = MC13783_LED_C3_TC_P_MASK << shift;
+ break;
+ }
+
+ mc13783_lock(led->master);
+
+ mc13783_reg_rmw(led->master, reg, mask, value);
+
+ mc13783_unlock(led->master);
+}
+
+static void mc13783_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct mc13783_led *led;
+
+ led = container_of(led_cdev, struct mc13783_led, cdev);
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
+{
+ int shift = 0;
+ int mask = 0;
+ int value = 0;
+ int reg = 0;
+ int ret, bank;
+
+ switch (led->id) {
+ case MC13783_LED_MD:
+ shift = MC13783_LED_C2_MD_C;
+ mask = MC13783_LED_C2_BL_C_MASK;
+ value = max_current & MC13783_LED_C2_BL_C_MASK;
+ reg = MC13783_REG_LED_CONTROL_2;
+ break;
+ case MC13783_LED_AD:
+ shift = MC13783_LED_C2_AD_C;
+ mask = MC13783_LED_C2_BL_C_MASK;
+ value = max_current & MC13783_LED_C2_BL_C_MASK;
+ reg = MC13783_REG_LED_CONTROL_2;
+ break;
+ case MC13783_LED_KP:
+ shift = MC13783_LED_C2_KP_C;
+ mask = MC13783_LED_C2_BL_C_MASK;
+ value = max_current & MC13783_LED_C2_BL_C_MASK;
+ reg = MC13783_REG_LED_CONTROL_2;
+ break;
+ case MC13783_LED_R1:
+ case MC13783_LED_G1:
+ case MC13783_LED_B1:
+ case MC13783_LED_R2:
+ case MC13783_LED_G2:
+ case MC13783_LED_B2:
+ case MC13783_LED_R3:
+ case MC13783_LED_G3:
+ case MC13783_LED_B3:
+ bank = (led->id - MC13783_LED_R1)/3;
+ reg = MC13783_REG_LED_CONTROL_3 + bank;
+ shift = ((led->id - MC13783_LED_R1) - bank * 3) * 2;
+ mask = MC13783_LED_Cx_TC_C_MASK;
+ value = max_current & MC13783_LED_Cx_TC_C_MASK;
+ break;
+ }
+
+ mc13783_lock(led->master);
+
+ ret = mc13783_reg_rmw(led->master, reg, mask << shift,
+ value << shift);
+
+ mc13783_unlock(led->master);
+ return ret;
+}
+
+static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
+{
+ struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
+ int ret = 0;
+ int reg = 0;
+
+ mc13783_lock(dev);
+
+ if (pdata->flags & MC13783_LED_TC1HALF)
+ reg |= MC13783_LED_C1_TC1HALF_BIT;
+
+ if (pdata->flags & MC13783_LED_SLEWLIMTC)
+ reg |= MC13783_LED_Cx_SLEWLIM_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->bl_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_SLEWLIMBL)
+ reg |= MC13783_LED_Cx_SLEWLIM_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->tc1_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_TRIODE_TC1)
+ reg |= MC13783_LED_Cx_TRIODE_TC_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->tc2_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_TRIODE_TC2)
+ reg |= MC13783_LED_Cx_TRIODE_TC_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->tc3_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_TRIODE_TC3)
+ reg |= MC13783_LED_Cx_TRIODE_TC_BIT;;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, reg);
+ if (ret)
+ goto out;
+
+ reg = MC13783_LED_C0_ENABLE_BIT;
+ if (pdata->flags & MC13783_LED_TRIODE_MD)
+ reg |= MC13783_LED_C0_TRIODE_MD_BIT;
+ if (pdata->flags & MC13783_LED_TRIODE_AD)
+ reg |= MC13783_LED_C0_TRIODE_AD_BIT;
+ if (pdata->flags & MC13783_LED_TRIODE_KP)
+ reg |= MC13783_LED_C0_TRIODE_KP_BIT;
+ if (pdata->flags & MC13783_LED_BOOST_EN)
+ reg |= MC13783_LED_C0_BOOST_BIT;
+
+ reg |= (pdata->abmode & MC13783_LED_C0_ABMODE_MASK) <<
+ MC13783_LED_C0_ABMODE;
+ reg |= (pdata->abref & MC13783_LED_C0_ABREF_MASK) <<
+ MC13783_LED_C0_ABREF;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, reg);
+
+out:
+ mc13783_unlock(dev);
+ return ret;
+}
+
+static int __devinit mc13783_led_probe(struct platform_device *pdev)
+{
+ struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_led_platform_data *led_cur;
+ struct mc13783_led *led, *led_dat;
+ int ret, i;
+ int init_led = 0;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ if (pdata->num_leds < 1 || pdata->num_leds > MC13783_LED_MAX) {
+ dev_err(&pdev->dev, "Invalid led count %d\n", pdata->num_leds);
+ return -EINVAL;
+ }
+
+ led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&pdev->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ ret = mc13783_leds_prepare(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to init led driver\n");
+ goto err_free;
+ }
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_dat = &led[i];
+ led_cur = &pdata->led[i];
+
+ if (led_cur->id > MC13783_LED_MAX || led_cur->id < 0) {
+ dev_err(&pdev->dev, "invalid id %d\n", led_cur->id);
+ ret = -EINVAL;
+ goto err_register;
+ }
+
+ if (init_led & (1 << led_cur->id)) {
+ dev_err(&pdev->dev, "led %d already initialized\n",
+ led_cur->id);
+ ret = -EINVAL;
+ goto err_register;
+ }
+
+ init_led |= 1 << led_cur->id;
+ led_dat->cdev.name = led_cur->name;
+ led_dat->cdev.default_trigger = led_cur->default_trigger;
+ led_dat->cdev.brightness_set = mc13783_led_set;
+ led_dat->cdev.brightness = LED_OFF;
+ led_dat->id = led_cur->id;
+ led_dat->master = dev_get_drvdata(pdev->dev.parent);
+
+ INIT_WORK(&led_dat->work, mc13783_led_work);
+
+ ret = led_classdev_register(pdev->dev.parent, &led_dat->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register led %d\n",
+ led_dat->id);
+ goto err_register;
+ }
+
+ ret = mc13783_led_setup(led_dat, led_cur->max_current);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to init led %d\n",
+ led_dat->id);
+ i++;
+ goto err_register;
+ }
+ }
+
+ platform_set_drvdata(pdev, led);
+ return 0;
+
+err_register:
+ for (i = i - 1; i >= 0; i--) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+err_free:
+ kfree(led);
+ return ret;
+}
+
+static int __devexit mc13783_led_remove(struct platform_device *pdev)
+{
+ struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_led *led = platform_get_drvdata(pdev);
+ struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
+ int i;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ mc13783_lock(dev);
+
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, 0);
+
+ mc13783_unlock(dev);
+
+ kfree(led);
+ return 0;
+}
+
+static struct platform_driver mc13783_led_driver = {
+ .driver = {
+ .name = "mc13783-led",
+ .owner = THIS_MODULE,
+ },
+ .probe = mc13783_led_probe,
+ .remove = __devexit_p(mc13783_led_remove),
+};
+
+static int __init mc13783_led_init(void)
+{
+ return platform_driver_register(&mc13783_led_driver);
+}
+module_init(mc13783_led_init);
+
+static void __exit mc13783_led_exit(void)
+{
+ platform_driver_unregister(&mc13783_led_driver);
+}
+module_exit(mc13783_led_exit);
+
+MODULE_DESCRIPTION("LEDs driver for Freescale MC13783 PMIC");
+MODULE_AUTHOR("Philippe Retornaz <philippe.retornaz@epfl.ch>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mc13783-led");
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
new file mode 100644
index 000000000000..3063f591f0dc
--- /dev/null
+++ b/drivers/leds/leds-net5501.c
@@ -0,0 +1,94 @@
+/*
+ * Soekris board support code
+ *
+ * Copyright (C) 2008-2009 Tower Technologies
+ * Written by Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#include <asm/geode.h>
+
+static struct gpio_led net5501_leds[] = {
+ {
+ .name = "error",
+ .gpio = 6,
+ .default_trigger = "default-on",
+ },
+};
+
+static struct gpio_led_platform_data net5501_leds_data = {
+ .num_leds = ARRAY_SIZE(net5501_leds),
+ .leds = net5501_leds,
+};
+
+static struct platform_device net5501_leds_dev = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev.platform_data = &net5501_leds_data,
+};
+
+static void __init init_net5501(void)
+{
+ platform_device_register(&net5501_leds_dev);
+}
+
+struct soekris_board {
+ u16 offset;
+ char *sig;
+ u8 len;
+ void (*init)(void);
+};
+
+static struct soekris_board __initdata boards[] = {
+ { 0xb7b, "net5501", 7, init_net5501 }, /* net5501 v1.33/1.33c */
+ { 0xb1f, "net5501", 7, init_net5501 }, /* net5501 v1.32i */
+};
+
+static int __init soekris_init(void)
+{
+ int i;
+ unsigned char *rombase, *bios;
+
+ if (!is_geode())
+ return 0;
+
+ rombase = ioremap(0xffff0000, 0xffff);
+ if (!rombase) {
+ printk(KERN_INFO "Soekris net5501 LED driver failed to get rombase");
+ return 0;
+ }
+
+ bios = rombase + 0x20; /* null terminated */
+
+ if (strncmp(bios, "comBIOS", 7))
+ goto unmap;
+
+ for (i = 0; i < ARRAY_SIZE(boards); i++) {
+ unsigned char *model = rombase + boards[i].offset;
+
+ if (strncmp(model, boards[i].sig, boards[i].len) == 0) {
+ printk(KERN_INFO "Soekris %s: %s\n", model, bios);
+
+ if (boards[i].init)
+ boards[i].init();
+ break;
+ }
+ }
+
+unmap:
+ iounmap(rombase);
+ return 0;
+}
+
+arch_initcall(soekris_init);
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 6682175fa9f7..43d08756d823 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -320,10 +320,8 @@ static int pca9532_probe(struct i2c_client *client,
mutex_init(&data->update_lock);
err = pca9532_configure(client, data, pca9532_pdata);
- if (err) {
+ if (err)
kfree(data);
- i2c_set_clientdata(client, NULL);
- }
return err;
}
@@ -351,7 +349,6 @@ static int pca9532_remove(struct i2c_client *client)
}
kfree(data);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 8ff50f234190..66aa3e8e786f 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -342,7 +342,6 @@ exit:
}
kfree(pca955x);
- i2c_set_clientdata(client, NULL);
return err;
}
@@ -358,7 +357,6 @@ static int __devexit pca955x_remove(struct i2c_client *client)
}
kfree(pca955x);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 51477ec71391..a688293abd0b 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -534,7 +534,7 @@ static int __init nas_gpio_init(void)
set_power_light_amber_noblink();
return 0;
out_err:
- for (; i >= 0; i--)
+ for (i--; i >= 0; i--)
unregister_nasgpio_led(i);
pci_unregister_driver(&nas_gpio_pci_driver);
return ret;
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 26a303a1d1ab..b6e7ddc09d76 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -39,14 +39,12 @@ static struct macio_chip *macio_on_hold;
static int macio_bus_match(struct device *dev, struct device_driver *drv)
{
- struct macio_dev * macio_dev = to_macio_device(dev);
- struct macio_driver * macio_drv = to_macio_driver(drv);
- const struct of_device_id * matches = macio_drv->match_table;
+ const struct of_device_id * matches = drv->of_match_table;
if (!matches)
return 0;
- return of_match_device(matches, &macio_dev->ofdev) != NULL;
+ return of_match_device(matches, dev) != NULL;
}
struct macio_dev *macio_dev_get(struct macio_dev *dev)
@@ -84,7 +82,7 @@ static int macio_device_probe(struct device *dev)
macio_dev_get(macio_dev);
- match = of_match_device(drv->match_table, &macio_dev->ofdev);
+ match = of_match_device(drv->driver.of_match_table, dev);
if (match)
error = drv->probe(macio_dev, match);
if (error)
@@ -248,7 +246,7 @@ static void macio_create_fixup_irq(struct macio_dev *dev, int index,
static void macio_add_missing_resources(struct macio_dev *dev)
{
- struct device_node *np = dev->ofdev.node;
+ struct device_node *np = dev->ofdev.dev.of_node;
unsigned int irq_base;
/* Gatwick has some missing interrupts on child nodes */
@@ -289,7 +287,7 @@ static void macio_add_missing_resources(struct macio_dev *dev)
static void macio_setup_interrupts(struct macio_dev *dev)
{
- struct device_node *np = dev->ofdev.node;
+ struct device_node *np = dev->ofdev.dev.of_node;
unsigned int irq;
int i = 0, j = 0;
@@ -317,7 +315,7 @@ static void macio_setup_interrupts(struct macio_dev *dev)
static void macio_setup_resources(struct macio_dev *dev,
struct resource *parent_res)
{
- struct device_node *np = dev->ofdev.node;
+ struct device_node *np = dev->ofdev.dev.of_node;
struct resource r;
int index;
@@ -373,9 +371,9 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dev->bus = &chip->lbus;
dev->media_bay = in_bay;
- dev->ofdev.node = np;
- dev->ofdev.dma_mask = 0xffffffffUL;
- dev->ofdev.dev.dma_mask = &dev->ofdev.dma_mask;
+ dev->ofdev.dev.of_node = np;
+ dev->ofdev.archdata.dma_mask = 0xffffffffUL;
+ dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask;
dev->ofdev.dev.parent = parent;
dev->ofdev.dev.bus = &macio_bus_type;
dev->ofdev.dev.release = macio_release_dev;
@@ -494,9 +492,9 @@ static void macio_pci_add_devices(struct macio_chip *chip)
}
/* Add media bay devices if any */
- if (mbdev)
- for (np = NULL; (np = of_get_next_child(mbdev->ofdev.node, np))
- != NULL;) {
+ if (mbdev) {
+ pnode = mbdev->ofdev.dev.of_node;
+ for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
if (macio_skip_device(np))
continue;
of_node_get(np);
@@ -504,11 +502,12 @@ static void macio_pci_add_devices(struct macio_chip *chip)
mbdev, root_res) == NULL)
of_node_put(np);
}
+ }
/* Add serial ports if any */
if (sdev) {
- for (np = NULL; (np = of_get_next_child(sdev->ofdev.node, np))
- != NULL;) {
+ pnode = sdev->ofdev.dev.of_node;
+ for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
if (macio_skip_device(np))
continue;
of_node_get(np);
@@ -527,7 +526,6 @@ static void macio_pci_add_devices(struct macio_chip *chip)
int macio_register_driver(struct macio_driver *drv)
{
/* initialize common driver fields */
- drv->driver.name = drv->name;
drv->driver.bus = &macio_bus_type;
/* register with core */
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 9e9453b58425..6999ce59fd10 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -9,7 +9,7 @@ field##_show (struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct macio_dev *mdev = to_macio_device (dev); \
- return sprintf (buf, format_string, mdev->ofdev.node->field); \
+ return sprintf (buf, format_string, mdev->ofdev.dev.of_node->field); \
}
static ssize_t
@@ -21,7 +21,7 @@ compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
int length = 0;
of = &to_macio_device (dev)->ofdev;
- compat = of_get_property(of->node, "compatible", &cplen);
+ compat = of_get_property(of->dev.of_node, "compatible", &cplen);
if (!compat) {
*buf = '\0';
return 0;
@@ -58,7 +58,7 @@ static ssize_t devspec_show(struct device *dev,
struct of_device *ofdev;
ofdev = to_of_device(dev);
- return sprintf(buf, "%s\n", ofdev->node->full_name);
+ return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
}
macio_config_of_attr (name, "%s\n");
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 08002b88f342..2fd435bc542e 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -564,7 +564,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
unsigned long base;
int i;
- ofnode = mdev->ofdev.node;
+ ofnode = mdev->ofdev.dev.of_node;
if (macio_resource_count(mdev) < 1)
return -ENODEV;
@@ -728,8 +728,10 @@ static struct of_device_id media_bay_match[] =
static struct macio_driver media_bay_driver =
{
- .name = "media-bay",
- .match_table = media_bay_match,
+ .driver = {
+ .name = "media-bay",
+ .of_match_table = media_bay_match,
+ },
.probe = media_bay_attach,
.suspend = media_bay_suspend,
.resume = media_bay_resume
diff --git a/drivers/macintosh/nvram.c b/drivers/macintosh/nvram.c
index c876349c32de..a271c8218d82 100644
--- a/drivers/macintosh/nvram.c
+++ b/drivers/macintosh/nvram.c
@@ -100,7 +100,7 @@ const struct file_operations nvram_fops = {
.llseek = nvram_llseek,
.read = read_nvram,
.write = write_nvram,
- .ioctl = nvram_ioctl,
+ .unlocked_ioctl = nvram_ioctl,
};
static struct miscdevice nvram_dev = {
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 7c54d80c4fb2..53cce3a5da23 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -375,7 +375,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev,
pr_debug("rackmeter_probe()\n");
/* Get i2s-a node */
- while ((i2s = of_get_next_child(mdev->ofdev.node, i2s)) != NULL)
+ while ((i2s = of_get_next_child(mdev->ofdev.dev.of_node, i2s)) != NULL)
if (strcmp(i2s->name, "i2s-a") == 0)
break;
if (i2s == NULL) {
@@ -431,7 +431,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev,
of_address_to_resource(i2s, 1, &rdma)) {
printk(KERN_ERR
"rackmeter: found match but lacks resources: %s",
- mdev->ofdev.node->full_name);
+ mdev->ofdev.dev.of_node->full_name);
rc = -ENXIO;
goto bail_free;
}
@@ -584,9 +584,11 @@ static struct of_device_id rackmeter_match[] = {
};
static struct macio_driver rackmeter_driver = {
- .name = "rackmeter",
- .owner = THIS_MODULE,
- .match_table = rackmeter_match,
+ .driver = {
+ .name = "rackmeter",
+ .owner = THIS_MODULE,
+ .of_match_table = rackmeter_match,
+ },
.probe = rackmeter_probe,
.remove = __devexit_p(rackmeter_remove),
.shutdown = rackmeter_shutdown,
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index c9da5c4c167d..2506c957712e 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -671,8 +671,11 @@ static const struct of_device_id smu_platform_match[] =
static struct of_platform_driver smu_of_platform_driver =
{
- .name = "smu",
- .match_table = smu_platform_match,
+ .driver = {
+ .name = "smu",
+ .owner = THIS_MODULE,
+ .of_match_table = smu_platform_match,
+ },
.probe = smu_platform_probe,
};
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 16d82f17ae82..c42eeb43042d 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -182,7 +182,6 @@ remove_thermostat(struct i2c_client *client)
thermostat = NULL;
- i2c_set_clientdata(client, NULL);
kfree(th);
return 0;
@@ -400,7 +399,6 @@ static int probe_thermostat(struct i2c_client *client,
rc = read_reg(th, CONFIG_REG);
if (rc < 0) {
dev_err(&client->dev, "Thermostat failed to read config!\n");
- i2c_set_clientdata(client, NULL);
kfree(th);
return -ENODEV;
}
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index b18fa948f3d1..e60605bd0ea9 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2215,7 +2215,7 @@ static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match)
state = state_detached;
/* Lookup the fans in the device tree */
- fcu_lookup_fans(dev->node);
+ fcu_lookup_fans(dev->dev.of_node);
/* Add the driver */
return i2c_add_driver(&therm_pm72_driver);
@@ -2238,8 +2238,11 @@ static const struct of_device_id fcu_match[] =
static struct of_platform_driver fcu_of_platform_driver =
{
- .name = "temperature",
- .match_table = fcu_match,
+ .driver = {
+ .name = "temperature",
+ .owner = THIS_MODULE,
+ .of_match_table = fcu_match,
+ },
.probe = fcu_of_probe,
.remove = fcu_of_remove
};
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 0839770e4ec5..5c9367acf0cf 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -463,8 +463,11 @@ static const struct of_device_id therm_of_match[] = {{
};
static struct of_platform_driver therm_of_driver = {
- .name = "temperature",
- .match_table = therm_of_match,
+ .driver = {
+ .name = "temperature",
+ .owner = THIS_MODULE,
+ .of_match_table = therm_of_match,
+ },
.probe = therm_of_probe,
.remove = therm_of_remove,
};
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 42764849eb78..3d4fc0f7b00b 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2273,8 +2273,7 @@ static int register_pmu_pm_ops(void)
device_initcall(register_pmu_pm_ops);
#endif
-static int
-pmu_ioctl(struct inode * inode, struct file *filp,
+static int pmu_ioctl(struct file *filp,
u_int cmd, u_long arg)
{
__u32 __user *argp = (__u32 __user *)arg;
@@ -2337,11 +2336,23 @@ pmu_ioctl(struct inode * inode, struct file *filp,
return error;
}
+static long pmu_unlocked_ioctl(struct file *filp,
+ u_int cmd, u_long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = pmu_ioctl(filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static const struct file_operations pmu_device_fops = {
.read = pmu_read,
.write = pmu_write,
.poll = pmu_fpoll,
- .ioctl = pmu_ioctl,
+ .unlocked_ioctl = pmu_unlocked_ioctl,
.open = pmu_open,
.release = pmu_release,
};
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index d8257d35afde..647c6add2193 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -107,10 +107,8 @@ static int wf_lm75_probe(struct i2c_client *client,
i2c_set_clientdata(client, lm);
rc = wf_register_sensor(&lm->sens);
- if (rc) {
- i2c_set_clientdata(client, NULL);
+ if (rc)
kfree(lm);
- }
return rc;
}
@@ -216,7 +214,6 @@ static int wf_lm75_remove(struct i2c_client *client)
/* release sensor */
wf_unregister_sensor(&lm->sens);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index b486eb929fde..8204113268f4 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -81,7 +81,6 @@ static int wf_max6690_probe(struct i2c_client *client,
rc = wf_register_sensor(&max->sens);
if (rc) {
- i2c_set_clientdata(client, NULL);
kfree(max);
}
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index e20330a28959..65a8ff3e1f8e 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -376,7 +376,6 @@ static int wf_sat_remove(struct i2c_client *client)
/* XXX TODO */
sat->i2c = NULL;
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index acb3a4e404ff..4a6feac8c94a 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -100,8 +100,8 @@ config MD_RAID1
If unsure, say Y.
config MD_RAID10
- tristate "RAID-10 (mirrored striping) mode (EXPERIMENTAL)"
- depends on BLK_DEV_MD && EXPERIMENTAL
+ tristate "RAID-10 (mirrored striping) mode"
+ depends on BLK_DEV_MD
---help---
RAID-10 provides a combination of striping (RAID-0) and
mirroring (RAID-1) with easier configuration and more flexible
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f084249295d9..1742435ce3ae 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -505,7 +505,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
return;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
sb->events = cpu_to_le64(bitmap->mddev->events);
if (bitmap->mddev->events < bitmap->events_cleared) {
/* rocking back to read-only */
@@ -526,7 +526,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->sb_page)
return;
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
@@ -575,7 +575,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
return err;
}
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@@ -661,7 +661,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
return 0;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
old = le32_to_cpu(sb->state) & bits;
switch (op) {
case MASK_SET: sb->state |= cpu_to_le32(bits);
@@ -1292,9 +1292,14 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
if (!bitmap) return 0;
if (behind) {
+ int bw;
atomic_inc(&bitmap->behind_writes);
+ bw = atomic_read(&bitmap->behind_writes);
+ if (bw > bitmap->behind_writes_used)
+ bitmap->behind_writes_used = bw;
+
PRINTK(KERN_DEBUG "inc write-behind count %d/%d\n",
- atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
+ bw, bitmap->max_write_behind);
}
while (sectors) {
@@ -1351,7 +1356,8 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
{
if (!bitmap) return;
if (behind) {
- atomic_dec(&bitmap->behind_writes);
+ if (atomic_dec_and_test(&bitmap->behind_writes))
+ wake_up(&bitmap->behind_wait);
PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n",
atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
}
@@ -1675,6 +1681,7 @@ int bitmap_create(mddev_t *mddev)
atomic_set(&bitmap->pending_writes, 0);
init_waitqueue_head(&bitmap->write_wait);
init_waitqueue_head(&bitmap->overflow_wait);
+ init_waitqueue_head(&bitmap->behind_wait);
bitmap->mddev = mddev;
@@ -1692,7 +1699,7 @@ int bitmap_create(mddev_t *mddev)
* and bypass the page cache, we must sync the file
* first.
*/
- vfs_fsync(file, file->f_dentry, 1);
+ vfs_fsync(file, 1);
}
/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
if (!mddev->bitmap_info.external)
@@ -2006,6 +2013,27 @@ static ssize_t can_clear_store(mddev_t *mddev, const char *buf, size_t len)
static struct md_sysfs_entry bitmap_can_clear =
__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
+static ssize_t
+behind_writes_used_show(mddev_t *mddev, char *page)
+{
+ if (mddev->bitmap == NULL)
+ return sprintf(page, "0\n");
+ return sprintf(page, "%lu\n",
+ mddev->bitmap->behind_writes_used);
+}
+
+static ssize_t
+behind_writes_used_reset(mddev_t *mddev, const char *buf, size_t len)
+{
+ if (mddev->bitmap)
+ mddev->bitmap->behind_writes_used = 0;
+ return len;
+}
+
+static struct md_sysfs_entry max_backlog_used =
+__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
+ behind_writes_used_show, behind_writes_used_reset);
+
static struct attribute *md_bitmap_attrs[] = {
&bitmap_location.attr,
&bitmap_timeout.attr,
@@ -2013,6 +2041,7 @@ static struct attribute *md_bitmap_attrs[] = {
&bitmap_chunksize.attr,
&bitmap_metadata.attr,
&bitmap_can_clear.attr,
+ &max_backlog_used.attr,
NULL
};
struct attribute_group md_bitmap_group = {
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index cb821d76d1b4..3797dea4723a 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -227,6 +227,7 @@ struct bitmap {
int allclean;
atomic_t behind_writes;
+ unsigned long behind_writes_used; /* highest actual value at runtime */
/*
* the bitmap daemon - periodically wakes up and sweeps the bitmap
@@ -239,6 +240,7 @@ struct bitmap {
atomic_t pending_writes; /* pending writes to the bitmap file */
wait_queue_head_t write_wait;
wait_queue_head_t overflow_wait;
+ wait_queue_head_t behind_wait;
struct sysfs_dirent *sysfs_can_clear;
};
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 8e3850b98cca..1a8987884614 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -169,10 +169,9 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
conf->nfaults = n+1;
}
-static int make_request(struct request_queue *q, struct bio *bio)
+static int make_request(mddev_t *mddev, struct bio *bio)
{
- mddev_t *mddev = q->queuedata;
- conf_t *conf = (conf_t*)mddev->private;
+ conf_t *conf = mddev->private;
int failit = 0;
if (bio_data_dir(bio) == WRITE) {
@@ -225,7 +224,7 @@ static int make_request(struct request_queue *q, struct bio *bio)
static void status(struct seq_file *seq, mddev_t *mddev)
{
- conf_t *conf = (conf_t*)mddev->private;
+ conf_t *conf = mddev->private;
int n;
if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
@@ -328,7 +327,7 @@ static int run(mddev_t *mddev)
static int stop(mddev_t *mddev)
{
- conf_t *conf = (conf_t *)mddev->private;
+ conf_t *conf = mddev->private;
kfree(conf);
mddev->private = NULL;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 09437e958235..7e0e057db9a7 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -159,7 +159,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) {
- printk("linear: disk numbering problem. Aborting!\n");
+ printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -187,7 +188,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
}
if (cnt != raid_disks) {
- printk("linear: not enough drives present. Aborting!\n");
+ printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -282,29 +284,21 @@ static int linear_stop (mddev_t *mddev)
rcu_barrier();
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
kfree(conf);
+ mddev->private = NULL;
return 0;
}
-static int linear_make_request (struct request_queue *q, struct bio *bio)
+static int linear_make_request (mddev_t *mddev, struct bio *bio)
{
- const int rw = bio_data_dir(bio);
- mddev_t *mddev = q->queuedata;
dev_info_t *tmp_dev;
sector_t start_sector;
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
return 0;
}
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
rcu_read_lock();
tmp_dev = which_dev(mddev, bio->bi_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
@@ -314,12 +308,14 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
|| (bio->bi_sector < start_sector))) {
char b[BDEVNAME_SIZE];
- printk("linear_make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
- (unsigned long long)bio->bi_sector,
- bdevname(tmp_dev->rdev->bdev, b),
- (unsigned long long)tmp_dev->rdev->sectors,
- (unsigned long long)start_sector);
+ printk(KERN_ERR
+ "md/linear:%s: make_request: Sector %llu out of bounds on "
+ "dev %s: %llu sectors, offset %llu\n",
+ mdname(mddev),
+ (unsigned long long)bio->bi_sector,
+ bdevname(tmp_dev->rdev->bdev, b),
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
rcu_read_unlock();
bio_io_error(bio);
return 0;
@@ -336,9 +332,9 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
bp = bio_split(bio, end_sector - bio->bi_sector);
- if (linear_make_request(q, &bp->bio1))
+ if (linear_make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (linear_make_request(q, &bp->bio2))
+ if (linear_make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index a9fd491796ac..46b3a044eadf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -215,8 +215,11 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
*/
static int md_make_request(struct request_queue *q, struct bio *bio)
{
+ const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
int rv;
+ int cpu;
+
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
return 0;
@@ -237,13 +240,27 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
}
atomic_inc(&mddev->active_io);
rcu_read_unlock();
- rv = mddev->pers->make_request(q, bio);
+
+ rv = mddev->pers->make_request(mddev, bio);
+
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
+
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
return rv;
}
+/* mddev_suspend makes sure no new requests are submitted
+ * to the device, and that any requests that have been submitted
+ * are completely handled.
+ * Once ->stop is called and completes, the module will be completely
+ * unused.
+ */
static void mddev_suspend(mddev_t *mddev)
{
BUG_ON(mddev->suspended);
@@ -251,13 +268,6 @@ static void mddev_suspend(mddev_t *mddev)
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
- /* we now know that no code is executing in the personality module,
- * except possibly the tail end of a ->bi_end_io function, but that
- * is certain to complete before the module has a chance to get
- * unloaded
- */
}
static void mddev_resume(mddev_t *mddev)
@@ -344,7 +354,7 @@ static void md_submit_barrier(struct work_struct *ws)
bio_endio(bio, 0);
else {
bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
- if (mddev->pers->make_request(mddev->queue, bio))
+ if (mddev->pers->make_request(mddev, bio))
generic_make_request(bio);
mddev->barrier = POST_REQUEST_BARRIER;
submit_barriers(mddev);
@@ -406,6 +416,27 @@ static void mddev_put(mddev_t *mddev)
spin_unlock(&all_mddevs_lock);
}
+static void mddev_init(mddev_t *mddev)
+{
+ mutex_init(&mddev->open_mutex);
+ mutex_init(&mddev->reconfig_mutex);
+ mutex_init(&mddev->bitmap_info.mutex);
+ INIT_LIST_HEAD(&mddev->disks);
+ INIT_LIST_HEAD(&mddev->all_mddevs);
+ init_timer(&mddev->safemode_timer);
+ atomic_set(&mddev->active, 1);
+ atomic_set(&mddev->openers, 0);
+ atomic_set(&mddev->active_io, 0);
+ spin_lock_init(&mddev->write_lock);
+ atomic_set(&mddev->flush_pending, 0);
+ init_waitqueue_head(&mddev->sb_wait);
+ init_waitqueue_head(&mddev->recovery_wait);
+ mddev->reshape_position = MaxSector;
+ mddev->resync_min = 0;
+ mddev->resync_max = MaxSector;
+ mddev->level = LEVEL_NONE;
+}
+
static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
@@ -472,23 +503,7 @@ static mddev_t * mddev_find(dev_t unit)
else
new->md_minor = MINOR(unit) >> MdpMinorShift;
- mutex_init(&new->open_mutex);
- mutex_init(&new->reconfig_mutex);
- mutex_init(&new->bitmap_info.mutex);
- INIT_LIST_HEAD(&new->disks);
- INIT_LIST_HEAD(&new->all_mddevs);
- init_timer(&new->safemode_timer);
- atomic_set(&new->active, 1);
- atomic_set(&new->openers, 0);
- atomic_set(&new->active_io, 0);
- spin_lock_init(&new->write_lock);
- atomic_set(&new->flush_pending, 0);
- init_waitqueue_head(&new->sb_wait);
- init_waitqueue_head(&new->recovery_wait);
- new->reshape_position = MaxSector;
- new->resync_min = 0;
- new->resync_max = MaxSector;
- new->level = LEVEL_NONE;
+ mddev_init(new);
goto retry;
}
@@ -508,9 +523,36 @@ static inline int mddev_trylock(mddev_t * mddev)
return mutex_trylock(&mddev->reconfig_mutex);
}
-static inline void mddev_unlock(mddev_t * mddev)
+static struct attribute_group md_redundancy_group;
+
+static void mddev_unlock(mddev_t * mddev)
{
- mutex_unlock(&mddev->reconfig_mutex);
+ if (mddev->to_remove) {
+ /* These cannot be removed under reconfig_mutex as
+ * an access to the files will try to take reconfig_mutex
+ * while holding the file unremovable, which leads to
+ * a deadlock.
+ * So hold open_mutex instead - we are allowed to take
+ * it while holding reconfig_mutex, and md_run can
+ * use it to wait for the remove to complete.
+ */
+ struct attribute_group *to_remove = mddev->to_remove;
+ mddev->to_remove = NULL;
+ mutex_lock(&mddev->open_mutex);
+ mutex_unlock(&mddev->reconfig_mutex);
+
+ if (to_remove != &md_redundancy_group)
+ sysfs_remove_group(&mddev->kobj, to_remove);
+ if (mddev->pers == NULL ||
+ mddev->pers->sync_request == NULL) {
+ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+ if (mddev->sysfs_action)
+ sysfs_put(mddev->sysfs_action);
+ mddev->sysfs_action = NULL;
+ }
+ mutex_unlock(&mddev->open_mutex);
+ } else
+ mutex_unlock(&mddev->reconfig_mutex);
md_wakeup_thread(mddev->thread);
}
@@ -1029,10 +1071,13 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->bitmap_info.default_offset;
} else if (mddev->pers == NULL) {
- /* Insist on good event counter while assembling */
+ /* Insist on good event counter while assembling, except
+ * for spares (which don't need an event count) */
++ev1;
- if (ev1 < mddev->events)
- return -EINVAL;
+ if (sb->disks[rdev->desc_nr].state & (
+ (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
+ if (ev1 < mddev->events)
+ return -EINVAL;
} else if (mddev->bitmap) {
/* if adding to array with a bitmap, then we can accept an
* older device ... but not too old.
@@ -1428,10 +1473,14 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
}
} else if (mddev->pers == NULL) {
- /* Insist of good event counter while assembling */
+ /* Insist of good event counter while assembling, except for
+ * spares (which don't need an event count) */
++ev1;
- if (ev1 < mddev->events)
- return -EINVAL;
+ if (rdev->desc_nr >= 0 &&
+ rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
+ le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
+ if (ev1 < mddev->events)
+ return -EINVAL;
} else if (mddev->bitmap) {
/* If adding to array with a bitmap, then we can accept an
* older device, but not too old.
@@ -2047,7 +2096,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
if (rdev->sb_events == mddev->events ||
(nospares &&
rdev->raid_disk < 0 &&
- (rdev->sb_events&1)==0 &&
rdev->sb_events+1 == mddev->events)) {
/* Don't update this superblock */
rdev->sb_loaded = 2;
@@ -2100,28 +2148,14 @@ repeat:
* and 'events' is odd, we can roll back to the previous clean state */
if (nospares
&& (mddev->in_sync && mddev->recovery_cp == MaxSector)
- && (mddev->events & 1)
- && mddev->events != 1)
+ && mddev->can_decrease_events
+ && mddev->events != 1) {
mddev->events--;
- else {
+ mddev->can_decrease_events = 0;
+ } else {
/* otherwise we have to go forward and ... */
mddev->events ++;
- if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
- /* .. if the array isn't clean, an 'even' event must also go
- * to spares. */
- if ((mddev->events&1)==0) {
- nospares = 0;
- sync_req = 2; /* force a second update to get the
- * even/odd in sync */
- }
- } else {
- /* otherwise an 'odd' event must go to spares */
- if ((mddev->events&1)) {
- nospares = 0;
- sync_req = 2; /* force a second update to get the
- * even/odd in sync */
- }
- }
+ mddev->can_decrease_events = nospares;
}
if (!mddev->events) {
@@ -2365,6 +2399,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
return err;
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&rdev->mddev->kobj, nm);
+ rdev->raid_disk = -1;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
@@ -2780,8 +2815,9 @@ static void analyze_sbs(mddev_t * mddev)
i = 0;
rdev_for_each(rdev, tmp, mddev) {
- if (rdev->desc_nr >= mddev->max_disks ||
- i > mddev->max_disks) {
+ if (mddev->max_disks &&
+ (rdev->desc_nr >= mddev->max_disks ||
+ i > mddev->max_disks)) {
printk(KERN_WARNING
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
@@ -2897,9 +2933,10 @@ level_show(mddev_t *mddev, char *page)
static ssize_t
level_store(mddev_t *mddev, const char *buf, size_t len)
{
- char level[16];
+ char clevel[16];
ssize_t rv = len;
struct mdk_personality *pers;
+ long level;
void *priv;
mdk_rdev_t *rdev;
@@ -2932,19 +2969,22 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
}
/* Now find the new personality */
- if (len == 0 || len >= sizeof(level))
+ if (len == 0 || len >= sizeof(clevel))
return -EINVAL;
- strncpy(level, buf, len);
- if (level[len-1] == '\n')
+ strncpy(clevel, buf, len);
+ if (clevel[len-1] == '\n')
len--;
- level[len] = 0;
+ clevel[len] = 0;
+ if (strict_strtol(clevel, 10, &level))
+ level = LEVEL_NONE;
- request_module("md-%s", level);
+ if (request_module("md-%s", clevel) != 0)
+ request_module("md-level-%s", clevel);
spin_lock(&pers_lock);
- pers = find_pers(LEVEL_NONE, level);
+ pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
- printk(KERN_WARNING "md: personality %s not loaded\n", level);
+ printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
@@ -2957,7 +2997,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
if (!pers->takeover) {
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
- mdname(mddev), level);
+ mdname(mddev), clevel);
return -EINVAL;
}
@@ -2973,13 +3013,44 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
mddev->delta_disks = 0;
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
- mdname(mddev), level);
+ mdname(mddev), clevel);
return PTR_ERR(priv);
}
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev->pers->stop(mddev);
+
+ if (mddev->pers->sync_request == NULL &&
+ pers->sync_request != NULL) {
+ /* need to add the md_redundancy_group */
+ if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
+ printk(KERN_WARNING
+ "md: cannot register extra attributes for %s\n",
+ mdname(mddev));
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
+ }
+ if (mddev->pers->sync_request != NULL &&
+ pers->sync_request == NULL) {
+ /* need to remove the md_redundancy_group */
+ if (mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+ }
+
+ if (mddev->pers->sync_request == NULL &&
+ mddev->external) {
+ /* We are converting from a no-redundancy array
+ * to a redundancy array and metadata is managed
+ * externally so we need to be sure that writes
+ * won't block due to a need to transition
+ * clean->dirty
+ * until external management is started.
+ */
+ mddev->in_sync = 0;
+ mddev->safemode_delay = 0;
+ mddev->safemode = 0;
+ }
+
module_put(mddev->pers->owner);
/* Invalidate devices that are now superfluous */
list_for_each_entry(rdev, &mddev->disks, same_set)
@@ -2994,11 +3065,20 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
+ if (mddev->pers->sync_request == NULL) {
+ /* this is now an array without redundancy, so
+ * it must always be in_sync
+ */
+ mddev->in_sync = 1;
+ del_timer_sync(&mddev->safemode_timer);
+ }
pers->run(mddev);
mddev_resume(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ sysfs_notify(&mddev->kobj, NULL, "level");
+ md_new_event(mddev);
return rv;
}
@@ -3237,6 +3317,7 @@ array_state_show(mddev_t *mddev, char *page)
}
static int do_md_stop(mddev_t * mddev, int ro, int is_open);
+static int md_set_readonly(mddev_t * mddev, int is_open);
static int do_md_run(mddev_t * mddev);
static int restart_array(mddev_t *mddev);
@@ -3267,7 +3348,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
break; /* not supported yet */
case readonly:
if (mddev->pers)
- err = do_md_stop(mddev, 1, 0);
+ err = md_set_readonly(mddev, 0);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
@@ -3277,7 +3358,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
- err = do_md_stop(mddev, 1, 0);
+ err = md_set_readonly(mddev, 0);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
@@ -4082,15 +4163,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
mddev_t *mddev = container_of(ws, mddev_t, del_work);
- if (mddev->private) {
- sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
- if (mddev->private != (void*)1)
- sysfs_remove_group(&mddev->kobj, mddev->private);
- if (mddev->sysfs_action)
- sysfs_put(mddev->sysfs_action);
- mddev->sysfs_action = NULL;
- mddev->private = NULL;
- }
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
@@ -4234,11 +4306,10 @@ static void md_safemode_timeout(unsigned long data)
static int start_dirty_degraded;
-static int do_md_run(mddev_t * mddev)
+static int md_run(mddev_t *mddev)
{
int err;
mdk_rdev_t *rdev;
- struct gendisk *disk;
struct mdk_personality *pers;
if (list_empty(&mddev->disks))
@@ -4248,6 +4319,13 @@ static int do_md_run(mddev_t * mddev)
if (mddev->pers)
return -EBUSY;
+ /* These two calls synchronise us with the
+ * sysfs_remove_group calls in mddev_unlock,
+ * so they must have completed.
+ */
+ mutex_lock(&mddev->open_mutex);
+ mutex_unlock(&mddev->open_mutex);
+
/*
* Analyze all RAID superblock(s)
*/
@@ -4296,8 +4374,6 @@ static int do_md_run(mddev_t * mddev)
sysfs_notify_dirent(rdev->sysfs_state);
}
- disk = mddev->gendisk;
-
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
@@ -4425,22 +4501,32 @@ static int do_md_run(mddev_t * mddev)
if (mddev->flags)
md_update_sb(mddev, 0);
- set_capacity(disk, mddev->array_sectors);
-
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
- revalidate_disk(mddev->gendisk);
- mddev->changed = 1;
md_new_event(mddev);
sysfs_notify_dirent(mddev->sysfs_state);
if (mddev->sysfs_action)
sysfs_notify_dirent(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
- kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
return 0;
}
+static int do_md_run(mddev_t *mddev)
+{
+ int err;
+
+ err = md_run(mddev);
+ if (err)
+ goto out;
+
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+out:
+ return err;
+}
+
static int restart_array(mddev_t *mddev)
{
struct gendisk *disk = mddev->gendisk;
@@ -4491,9 +4577,110 @@ void restore_bitmap_write_access(struct file *file)
spin_unlock(&inode->i_lock);
}
+static void md_clean(mddev_t *mddev)
+{
+ mddev->array_sectors = 0;
+ mddev->external_size = 0;
+ mddev->dev_sectors = 0;
+ mddev->raid_disks = 0;
+ mddev->recovery_cp = 0;
+ mddev->resync_min = 0;
+ mddev->resync_max = MaxSector;
+ mddev->reshape_position = MaxSector;
+ mddev->external = 0;
+ mddev->persistent = 0;
+ mddev->level = LEVEL_NONE;
+ mddev->clevel[0] = 0;
+ mddev->flags = 0;
+ mddev->ro = 0;
+ mddev->metadata_type[0] = 0;
+ mddev->chunk_sectors = 0;
+ mddev->ctime = mddev->utime = 0;
+ mddev->layout = 0;
+ mddev->max_disks = 0;
+ mddev->events = 0;
+ mddev->can_decrease_events = 0;
+ mddev->delta_disks = 0;
+ mddev->new_level = LEVEL_NONE;
+ mddev->new_layout = 0;
+ mddev->new_chunk_sectors = 0;
+ mddev->curr_resync = 0;
+ mddev->resync_mismatches = 0;
+ mddev->suspend_lo = mddev->suspend_hi = 0;
+ mddev->sync_speed_min = mddev->sync_speed_max = 0;
+ mddev->recovery = 0;
+ mddev->in_sync = 0;
+ mddev->degraded = 0;
+ mddev->barriers_work = 0;
+ mddev->safemode = 0;
+ mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.default_offset = 0;
+ mddev->bitmap_info.chunksize = 0;
+ mddev->bitmap_info.daemon_sleep = 0;
+ mddev->bitmap_info.max_write_behind = 0;
+}
+
+static void md_stop_writes(mddev_t *mddev)
+{
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(mddev->sync_thread);
+ mddev->sync_thread = NULL;
+ }
+
+ del_timer_sync(&mddev->safemode_timer);
+
+ bitmap_flush(mddev);
+ md_super_wait(mddev);
+
+ if (!mddev->in_sync || mddev->flags) {
+ /* mark array as shutdown cleanly */
+ mddev->in_sync = 1;
+ md_update_sb(mddev, 1);
+ }
+}
+
+static void md_stop(mddev_t *mddev)
+{
+ md_stop_writes(mddev);
+
+ mddev->pers->stop(mddev);
+ if (mddev->pers->sync_request && mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+ module_put(mddev->pers->owner);
+ mddev->pers = NULL;
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+}
+
+static int md_set_readonly(mddev_t *mddev, int is_open)
+{
+ int err = 0;
+ mutex_lock(&mddev->open_mutex);
+ if (atomic_read(&mddev->openers) > is_open) {
+ printk("md: %s still in use.\n",mdname(mddev));
+ err = -EBUSY;
+ goto out;
+ }
+ if (mddev->pers) {
+ md_stop_writes(mddev);
+
+ err = -ENXIO;
+ if (mddev->ro==1)
+ goto out;
+ mddev->ro = 1;
+ set_disk_ro(mddev->gendisk, 1);
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ sysfs_notify_dirent(mddev->sysfs_state);
+ err = 0;
+ }
+out:
+ mutex_unlock(&mddev->open_mutex);
+ return err;
+}
+
/* mode:
* 0 - completely stop and dis-assemble array
- * 1 - switch to readonly
* 2 - stop but do not disassemble array
*/
static int do_md_stop(mddev_t * mddev, int mode, int is_open)
@@ -4508,64 +4695,32 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
err = -EBUSY;
} else if (mddev->pers) {
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_unregister_thread(mddev->sync_thread);
- mddev->sync_thread = NULL;
- }
-
- del_timer_sync(&mddev->safemode_timer);
+ if (mddev->ro)
+ set_disk_ro(disk, 0);
- switch(mode) {
- case 1: /* readonly */
- err = -ENXIO;
- if (mddev->ro==1)
- goto out;
- mddev->ro = 1;
- break;
- case 0: /* disassemble */
- case 2: /* stop */
- bitmap_flush(mddev);
- md_super_wait(mddev);
- if (mddev->ro)
- set_disk_ro(disk, 0);
+ md_stop(mddev);
+ mddev->queue->merge_bvec_fn = NULL;
+ mddev->queue->unplug_fn = NULL;
+ mddev->queue->backing_dev_info.congested_fn = NULL;
- mddev->pers->stop(mddev);
- mddev->queue->merge_bvec_fn = NULL;
- mddev->queue->unplug_fn = NULL;
- mddev->queue->backing_dev_info.congested_fn = NULL;
- module_put(mddev->pers->owner);
- if (mddev->pers->sync_request && mddev->private == NULL)
- mddev->private = (void*)1;
- mddev->pers = NULL;
- /* tell userspace to handle 'inactive' */
- sysfs_notify_dirent(mddev->sysfs_state);
+ /* tell userspace to handle 'inactive' */
+ sysfs_notify_dirent(mddev->sysfs_state);
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk >= 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
- }
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ if (rdev->raid_disk >= 0) {
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ }
- set_capacity(disk, 0);
- mddev->changed = 1;
+ set_capacity(disk, 0);
+ revalidate_disk(disk);
- if (mddev->ro)
- mddev->ro = 0;
- }
- if (!mddev->in_sync || mddev->flags) {
- /* mark array as shutdown cleanly */
- mddev->in_sync = 1;
- md_update_sb(mddev, 1);
- }
- if (mode == 1)
- set_disk_ro(disk, 1);
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (mddev->ro)
+ mddev->ro = 0;
+
err = 0;
}
-out:
mutex_unlock(&mddev->open_mutex);
if (err)
return err;
@@ -4586,52 +4741,12 @@ out:
export_array(mddev);
- mddev->array_sectors = 0;
- mddev->external_size = 0;
- mddev->dev_sectors = 0;
- mddev->raid_disks = 0;
- mddev->recovery_cp = 0;
- mddev->resync_min = 0;
- mddev->resync_max = MaxSector;
- mddev->reshape_position = MaxSector;
- mddev->external = 0;
- mddev->persistent = 0;
- mddev->level = LEVEL_NONE;
- mddev->clevel[0] = 0;
- mddev->flags = 0;
- mddev->ro = 0;
- mddev->metadata_type[0] = 0;
- mddev->chunk_sectors = 0;
- mddev->ctime = mddev->utime = 0;
- mddev->layout = 0;
- mddev->max_disks = 0;
- mddev->events = 0;
- mddev->delta_disks = 0;
- mddev->new_level = LEVEL_NONE;
- mddev->new_layout = 0;
- mddev->new_chunk_sectors = 0;
- mddev->curr_resync = 0;
- mddev->resync_mismatches = 0;
- mddev->suspend_lo = mddev->suspend_hi = 0;
- mddev->sync_speed_min = mddev->sync_speed_max = 0;
- mddev->recovery = 0;
- mddev->in_sync = 0;
- mddev->changed = 0;
- mddev->degraded = 0;
- mddev->barriers_work = 0;
- mddev->safemode = 0;
- mddev->bitmap_info.offset = 0;
- mddev->bitmap_info.default_offset = 0;
- mddev->bitmap_info.chunksize = 0;
- mddev->bitmap_info.daemon_sleep = 0;
- mddev->bitmap_info.max_write_behind = 0;
+ md_clean(mddev);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
- } else if (mddev->pers)
- printk(KERN_INFO "md: %s switched to read-only mode.\n",
- mdname(mddev));
+ }
err = 0;
blk_integrity_unregister(disk);
md_new_event(mddev);
@@ -5349,7 +5464,7 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
if (raid_disks <= 0 ||
- raid_disks >= mddev->max_disks)
+ (mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
if (mddev->sync_thread || mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -5486,7 +5601,7 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
geo->heads = 2;
geo->sectors = 4;
- geo->cylinders = get_capacity(mddev->gendisk) / 8;
+ geo->cylinders = mddev->array_sectors / 8;
return 0;
}
@@ -5496,6 +5611,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
int err = 0;
void __user *argp = (void __user *)arg;
mddev_t *mddev = NULL;
+ int ro;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -5628,9 +5744,37 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto done_unlock;
case STOP_ARRAY_RO:
- err = do_md_stop(mddev, 1, 1);
+ err = md_set_readonly(mddev, 1);
goto done_unlock;
+ case BLKROSET:
+ if (get_user(ro, (int __user *)(arg))) {
+ err = -EFAULT;
+ goto done_unlock;
+ }
+ err = -EINVAL;
+
+ /* if the bdev is going readonly the value of mddev->ro
+ * does not matter, no writes are coming
+ */
+ if (ro)
+ goto done_unlock;
+
+ /* are we are already prepared for writes? */
+ if (mddev->ro != 1)
+ goto done_unlock;
+
+ /* transitioning to readauto need only happen for
+ * arrays that call md_write_start
+ */
+ if (mddev->pers) {
+ err = restart_array(mddev);
+ if (err == 0) {
+ mddev->ro = 2;
+ set_disk_ro(mddev->gendisk, 0);
+ }
+ }
+ goto done_unlock;
}
/*
@@ -5751,7 +5895,6 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_change(bdev);
out:
return err;
}
@@ -5766,21 +5909,6 @@ static int md_release(struct gendisk *disk, fmode_t mode)
return 0;
}
-
-static int md_media_changed(struct gendisk *disk)
-{
- mddev_t *mddev = disk->private_data;
-
- return mddev->changed;
-}
-
-static int md_revalidate(struct gendisk *disk)
-{
- mddev_t *mddev = disk->private_data;
-
- mddev->changed = 0;
- return 0;
-}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -5791,8 +5919,6 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
- .media_changed = md_media_changed,
- .revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
@@ -5906,7 +6032,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- set_bit(StateChanged, &rdev->flags);
+ sysfs_notify_dirent(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -6898,11 +7024,6 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->flags)
md_update_sb(mddev, 0);
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (test_and_clear_bit(StateChanged, &rdev->flags))
- sysfs_notify_dirent(rdev->sysfs_state);
-
-
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
/* resync/recovery still happening */
@@ -7039,7 +7160,7 @@ static int md_notify_reboot(struct notifier_block *this,
* appears to still be in use. Hence
* the '100'.
*/
- do_md_stop(mddev, 1, 100);
+ md_set_readonly(mddev, 100);
mddev_unlock(mddev);
}
/*
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8e4c75c00d46..7ab5ea155452 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -74,9 +74,6 @@ struct mdk_rdev_s
#define Blocked 8 /* An error occured on an externally
* managed array, don't allow writes
* until it is cleared */
-#define StateChanged 9 /* Faulty or Blocked has changed during
- * interrupt, so it needs to be
- * notified by the thread */
wait_queue_head_t blocked_wait;
int desc_nr; /* descriptor index in the superblock */
@@ -153,6 +150,12 @@ struct mddev_s
int external_size; /* size managed
* externally */
__u64 events;
+ /* If the last 'event' was simply a clean->dirty transition, and
+ * we didn't write it to the spares, then it is safe and simple
+ * to just decrement the event count on a dirty->clean transition.
+ * So we record that possibility here.
+ */
+ int can_decrease_events;
char uuid[16];
@@ -240,7 +243,6 @@ struct mddev_s
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
- int changed; /* true if we might need to reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
@@ -279,9 +281,6 @@ struct mddev_s
atomic_t writes_pending;
struct request_queue *queue; /* for plugging ... */
- atomic_t write_behind; /* outstanding async IO */
- unsigned int max_write_behind; /* 0 = sync */
-
struct bitmap *bitmap; /* the bitmap for the device */
struct {
struct file *file; /* the bitmap file */
@@ -305,6 +304,7 @@ struct mddev_s
atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;
+ struct attribute_group *to_remove;
/* Generic barrier handling.
* If there is a pending barrier request, all other
* writes are blocked while the devices are flushed.
@@ -336,7 +336,7 @@ struct mdk_personality
int level;
struct list_head list;
struct module *owner;
- int (*make_request)(struct request_queue *q, struct bio *bio);
+ int (*make_request)(mddev_t *mddev, struct bio *bio);
int (*run)(mddev_t *mddev);
int (*stop)(mddev_t *mddev);
void (*status)(struct seq_file *seq, mddev_t *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 789bf535d29c..410fb60699ac 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -85,7 +85,7 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
static void multipath_end_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
+ struct multipath_bh *mp_bh = bio->bi_private;
multipath_conf_t *conf = mp_bh->mddev->private;
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
@@ -136,14 +136,11 @@ static void multipath_unplug(struct request_queue *q)
}
-static int multipath_make_request (struct request_queue *q, struct bio * bio)
+static int multipath_make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
multipath_conf_t *conf = mddev->private;
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- const int rw = bio_data_dir(bio);
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
@@ -155,12 +152,6 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
bio_endio(bio, -EIO);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c3bec024612e..e70f004c99e8 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -23,15 +23,17 @@
#include <linux/slab.h>
#include "md.h"
#include "raid0.h"
+#include "raid5.h"
static void raid0_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid0_conf_t *conf = mddev->private;
mdk_rdev_t **devlist = conf->devlist;
+ int raid_disks = conf->strip_zone[0].nb_dev;
int i;
- for (i=0; i<mddev->raid_disks; i++) {
+ for (i=0; i < raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
blk_unplug(r_queue);
@@ -43,12 +45,13 @@ static int raid0_congested(void *data, int bits)
mddev_t *mddev = data;
raid0_conf_t *conf = mddev->private;
mdk_rdev_t **devlist = conf->devlist;
+ int raid_disks = conf->strip_zone[0].nb_dev;
int i, ret = 0;
if (mddev_congested(mddev, bits))
return 1;
- for (i = 0; i < mddev->raid_disks && !ret ; i++) {
+ for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
@@ -66,16 +69,17 @@ static void dump_zones(mddev_t *mddev)
sector_t zone_start = 0;
char b[BDEVNAME_SIZE];
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
printk(KERN_INFO "******* %s configuration *********\n",
mdname(mddev));
h = 0;
for (j = 0; j < conf->nr_strip_zones; j++) {
printk(KERN_INFO "zone%d=[", j);
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- printk("%s/",
- bdevname(conf->devlist[j*mddev->raid_disks
+ printk(KERN_CONT "%s/",
+ bdevname(conf->devlist[j*raid_disks
+ k]->bdev, b));
- printk("]\n");
+ printk(KERN_CONT "]\n");
zone_size = conf->strip_zone[j].zone_end - zone_start;
printk(KERN_INFO " zone offset=%llukb "
@@ -88,7 +92,7 @@ static void dump_zones(mddev_t *mddev)
printk(KERN_INFO "**********************************\n\n");
}
-static int create_strip_zones(mddev_t *mddev)
+static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
{
int i, c, err;
sector_t curr_zone_end, sectors;
@@ -101,8 +105,9 @@ static int create_strip_zones(mddev_t *mddev)
if (!conf)
return -ENOMEM;
list_for_each_entry(rdev1, &mddev->disks, same_set) {
- printk(KERN_INFO "raid0: looking at %s\n",
- bdevname(rdev1->bdev,b));
+ printk(KERN_INFO "md/raid0:%s: looking at %s\n",
+ mdname(mddev),
+ bdevname(rdev1->bdev, b));
c = 0;
/* round size to chunk_size */
@@ -111,14 +116,16 @@ static int create_strip_zones(mddev_t *mddev)
rdev1->sectors = sectors * mddev->chunk_sectors;
list_for_each_entry(rdev2, &mddev->disks, same_set) {
- printk(KERN_INFO "raid0: comparing %s(%llu)",
+ printk(KERN_INFO "md/raid0:%s: comparing %s(%llu)",
+ mdname(mddev),
bdevname(rdev1->bdev,b),
(unsigned long long)rdev1->sectors);
- printk(KERN_INFO " with %s(%llu)\n",
+ printk(KERN_CONT " with %s(%llu)\n",
bdevname(rdev2->bdev,b),
(unsigned long long)rdev2->sectors);
if (rdev2 == rdev1) {
- printk(KERN_INFO "raid0: END\n");
+ printk(KERN_INFO "md/raid0:%s: END\n",
+ mdname(mddev));
break;
}
if (rdev2->sectors == rdev1->sectors) {
@@ -126,20 +133,24 @@ static int create_strip_zones(mddev_t *mddev)
* Not unique, don't count it as a new
* group
*/
- printk(KERN_INFO "raid0: EQUAL\n");
+ printk(KERN_INFO "md/raid0:%s: EQUAL\n",
+ mdname(mddev));
c = 1;
break;
}
- printk(KERN_INFO "raid0: NOT EQUAL\n");
+ printk(KERN_INFO "md/raid0:%s: NOT EQUAL\n",
+ mdname(mddev));
}
if (!c) {
- printk(KERN_INFO "raid0: ==> UNIQUE\n");
+ printk(KERN_INFO "md/raid0:%s: ==> UNIQUE\n",
+ mdname(mddev));
conf->nr_strip_zones++;
- printk(KERN_INFO "raid0: %d zones\n",
- conf->nr_strip_zones);
+ printk(KERN_INFO "md/raid0:%s: %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
}
}
- printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
+ printk(KERN_INFO "md/raid0:%s: FINAL %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
err = -ENOMEM;
conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
conf->nr_strip_zones, GFP_KERNEL);
@@ -162,14 +173,18 @@ static int create_strip_zones(mddev_t *mddev)
list_for_each_entry(rdev1, &mddev->disks, same_set) {
int j = rdev1->raid_disk;
+ if (mddev->level == 10)
+ /* taking over a raid10-n2 array */
+ j /= 2;
+
if (j < 0 || j >= mddev->raid_disks) {
- printk(KERN_ERR "raid0: bad disk number %d - "
- "aborting!\n", j);
+ printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
+ "aborting!\n", mdname(mddev), j);
goto abort;
}
if (dev[j]) {
- printk(KERN_ERR "raid0: multiple devices for %d - "
- "aborting!\n", j);
+ printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
+ "aborting!\n", mdname(mddev), j);
goto abort;
}
dev[j] = rdev1;
@@ -191,8 +206,8 @@ static int create_strip_zones(mddev_t *mddev)
cnt++;
}
if (cnt != mddev->raid_disks) {
- printk(KERN_ERR "raid0: too few disks (%d of %d) - "
- "aborting!\n", cnt, mddev->raid_disks);
+ printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
+ "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
goto abort;
}
zone->nb_dev = cnt;
@@ -208,39 +223,44 @@ static int create_strip_zones(mddev_t *mddev)
zone = conf->strip_zone + i;
dev = conf->devlist + i * mddev->raid_disks;
- printk(KERN_INFO "raid0: zone %d\n", i);
+ printk(KERN_INFO "md/raid0:%s: zone %d\n",
+ mdname(mddev), i);
zone->dev_start = smallest->sectors;
smallest = NULL;
c = 0;
for (j=0; j<cnt; j++) {
rdev = conf->devlist[j];
- printk(KERN_INFO "raid0: checking %s ...",
- bdevname(rdev->bdev, b));
+ printk(KERN_INFO "md/raid0:%s: checking %s ...",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
if (rdev->sectors <= zone->dev_start) {
- printk(KERN_INFO " nope.\n");
+ printk(KERN_CONT " nope.\n");
continue;
}
- printk(KERN_INFO " contained as device %d\n", c);
+ printk(KERN_CONT " contained as device %d\n", c);
dev[c] = rdev;
c++;
if (!smallest || rdev->sectors < smallest->sectors) {
smallest = rdev;
- printk(KERN_INFO " (%llu) is smallest!.\n",
- (unsigned long long)rdev->sectors);
+ printk(KERN_INFO "md/raid0:%s: (%llu) is smallest!.\n",
+ mdname(mddev),
+ (unsigned long long)rdev->sectors);
}
}
zone->nb_dev = c;
sectors = (smallest->sectors - zone->dev_start) * c;
- printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
- zone->nb_dev, (unsigned long long)sectors);
+ printk(KERN_INFO "md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
+ mdname(mddev),
+ zone->nb_dev, (unsigned long long)sectors);
curr_zone_end += sectors;
zone->zone_end = curr_zone_end;
- printk(KERN_INFO "raid0: current zone start: %llu\n",
- (unsigned long long)smallest->sectors);
+ printk(KERN_INFO "md/raid0:%s: current zone start: %llu\n",
+ mdname(mddev),
+ (unsigned long long)smallest->sectors);
}
mddev->queue->unplug_fn = raid0_unplug;
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
@@ -251,7 +271,7 @@ static int create_strip_zones(mddev_t *mddev)
* chunk size is a multiple of that sector size
*/
if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
- printk(KERN_ERR "%s chunk_size of %d not valid\n",
+ printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
mdname(mddev),
mddev->chunk_sectors << 9);
goto abort;
@@ -261,14 +281,15 @@ static int create_strip_zones(mddev_t *mddev)
blk_queue_io_opt(mddev->queue,
(mddev->chunk_sectors << 9) * mddev->raid_disks);
- printk(KERN_INFO "raid0: done.\n");
- mddev->private = conf;
+ printk(KERN_INFO "md/raid0:%s: done.\n", mdname(mddev));
+ *private_conf = conf;
+
return 0;
abort:
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
- mddev->private = NULL;
+ *private_conf = NULL;
return err;
}
@@ -319,10 +340,12 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
static int raid0_run(mddev_t *mddev)
{
+ raid0_conf_t *conf;
int ret;
if (mddev->chunk_sectors == 0) {
- printk(KERN_ERR "md/raid0: chunk size must be set.\n");
+ printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
+ mdname(mddev));
return -EINVAL;
}
if (md_check_no_bitmap(mddev))
@@ -330,15 +353,27 @@ static int raid0_run(mddev_t *mddev)
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
- ret = create_strip_zones(mddev);
- if (ret < 0)
- return ret;
+ /* if private is not null, we are here after takeover */
+ if (mddev->private == NULL) {
+ ret = create_strip_zones(mddev, &conf);
+ if (ret < 0)
+ return ret;
+ mddev->private = conf;
+ }
+ conf = mddev->private;
+ if (conf->scale_raid_disks) {
+ int i;
+ for (i=0; i < conf->strip_zone[0].nb_dev; i++)
+ conf->devlist[i]->raid_disk /= conf->scale_raid_disks;
+ /* FIXME update sysfs rd links */
+ }
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
- printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
- (unsigned long long)mddev->array_sectors);
+ printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
+ mdname(mddev),
+ (unsigned long long)mddev->array_sectors);
/* calculate the max read-ahead size.
* For read-ahead of large files to be effective, we need to
* readahead at least twice a whole stripe. i.e. number of devices
@@ -402,6 +437,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
unsigned int sect_in_chunk;
sector_t chunk;
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
unsigned int chunk_sects = mddev->chunk_sectors;
if (is_power_of_2(chunk_sects)) {
@@ -424,7 +460,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
* + the position in the chunk
*/
*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
- return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
+ return conf->devlist[(zone - conf->strip_zone)*raid_disks
+ sector_div(sector, zone->nb_dev)];
}
@@ -444,27 +480,18 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev,
}
}
-static int raid0_make_request(struct request_queue *q, struct bio *bio)
+static int raid0_make_request(mddev_t *mddev, struct bio *bio)
{
- mddev_t *mddev = q->queuedata;
unsigned int chunk_sects;
sector_t sector_offset;
struct strip_zone *zone;
mdk_rdev_t *tmp_dev;
- const int rw = bio_data_dir(bio);
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
return 0;
}
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
chunk_sects = mddev->chunk_sectors;
if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
sector_t sector = bio->bi_sector;
@@ -482,9 +509,9 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
else
bp = bio_split(bio, chunk_sects -
sector_div(sector, chunk_sects));
- if (raid0_make_request(q, &bp->bio1))
+ if (raid0_make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (raid0_make_request(q, &bp->bio2))
+ if (raid0_make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
@@ -504,9 +531,10 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
return 1;
bad_map:
- printk("raid0_make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", chunk_sects / 2,
- (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
+ printk("md/raid0:%s: make_request bug: can't convert block across chunks"
+ " or bigger than %dk %llu %d\n",
+ mdname(mddev), chunk_sects / 2,
+ (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio);
return 0;
@@ -519,6 +547,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
int j, k, h;
char b[BDEVNAME_SIZE];
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
sector_t zone_size;
sector_t zone_start = 0;
@@ -529,7 +558,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
seq_printf(seq, "=[");
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
seq_printf(seq, "%s/", bdevname(
- conf->devlist[j*mddev->raid_disks + k]
+ conf->devlist[j*raid_disks + k]
->bdev, b));
zone_size = conf->strip_zone[j].zone_end - zone_start;
@@ -544,6 +573,104 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
return;
}
+static void *raid0_takeover_raid5(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+ raid0_conf_t *priv_conf;
+
+ if (mddev->degraded != 1) {
+ printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
+ mdname(mddev),
+ mddev->degraded);
+ return ERR_PTR(-EINVAL);
+ }
+
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ /* check slot number for a disk */
+ if (rdev->raid_disk == mddev->raid_disks-1) {
+ printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->raid_disks--;
+ mddev->delta_disks = -1;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ return priv_conf;
+}
+
+static void *raid0_takeover_raid10(mddev_t *mddev)
+{
+ raid0_conf_t *priv_conf;
+
+ /* Check layout:
+ * - far_copies must be 1
+ * - near_copies must be 2
+ * - disks number must be even
+ * - all mirrors must be already degraded
+ */
+ if (mddev->layout != ((1 << 8) + 2)) {
+ printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
+ mdname(mddev),
+ mddev->layout);
+ return ERR_PTR(-EINVAL);
+ }
+ if (mddev->raid_disks & 1) {
+ printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ if (mddev->degraded != (mddev->raid_disks>>1)) {
+ printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->delta_disks = - mddev->raid_disks / 2;
+ mddev->raid_disks += mddev->delta_disks;
+ mddev->degraded = 0;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ priv_conf->scale_raid_disks = 2;
+ return priv_conf;
+}
+
+static void *raid0_takeover(mddev_t *mddev)
+{
+ /* raid0 can take over:
+ * raid5 - providing it is Raid4 layout and one disk is faulty
+ * raid10 - assuming we have all necessary active disks
+ */
+ if (mddev->level == 5) {
+ if (mddev->layout == ALGORITHM_PARITY_N)
+ return raid0_takeover_raid5(mddev);
+
+ printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
+ mdname(mddev), ALGORITHM_PARITY_N);
+ }
+
+ if (mddev->level == 10)
+ return raid0_takeover_raid10(mddev);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void raid0_quiesce(mddev_t *mddev, int state)
+{
+}
+
static struct mdk_personality raid0_personality=
{
.name = "raid0",
@@ -554,6 +681,8 @@ static struct mdk_personality raid0_personality=
.stop = raid0_stop,
.status = raid0_status,
.size = raid0_size,
+ .takeover = raid0_takeover,
+ .quiesce = raid0_quiesce,
};
static int __init raid0_init (void)
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 91f8e876ee64..d724e664ca4d 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -13,6 +13,9 @@ struct raid0_private_data
struct strip_zone *strip_zone;
mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
int nr_strip_zones;
+ int scale_raid_disks; /* divide rdev->raid_disks by this in run()
+ * to handle conversion from raid10
+ */
};
typedef struct raid0_private_data raid0_conf_t;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e59b10e66edb..a948da8012de 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -263,7 +263,7 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
static void raid1_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int mirror;
conf_t *conf = r1_bio->mddev->private;
@@ -297,7 +297,8 @@ static void raid1_end_read_request(struct bio *bio, int error)
*/
char b[BDEVNAME_SIZE];
if (printk_ratelimit())
- printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
+ printk(KERN_ERR "md/raid1:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
reschedule_retry(r1_bio);
}
@@ -308,7 +309,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
static void raid1_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
conf_t *conf = r1_bio->mddev->private;
struct bio *to_put = NULL;
@@ -418,7 +419,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
*/
static int read_balance(conf_t *conf, r1bio_t *r1_bio)
{
- const unsigned long this_sector = r1_bio->sector;
+ const sector_t this_sector = r1_bio->sector;
int new_disk = conf->last_used, disk = new_disk;
int wonly_disk = -1;
const int sectors = r1_bio->sectors;
@@ -434,7 +435,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
retry:
if (conf->mddev->recovery_cp < MaxSector &&
(this_sector + sectors >= conf->next_resync)) {
- /* Choose the first operation device, for consistancy */
+ /* Choose the first operational device, for consistancy */
new_disk = 0;
for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
@@ -774,9 +775,8 @@ do_sync_io:
return NULL;
}
-static int make_request(struct request_queue *q, struct bio * bio)
+static int make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
conf_t *conf = mddev->private;
mirror_info_t *mirror;
r1bio_t *r1_bio;
@@ -788,7 +788,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
- int cpu;
bool do_barriers;
mdk_rdev_t *blocked_rdev;
@@ -834,12 +833,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
bitmap = mddev->bitmap;
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
/*
* make_request() can abort the operation when READA is being
* used and no empty request is available.
@@ -866,6 +859,15 @@ static int make_request(struct request_queue *q, struct bio * bio)
}
mirror = conf->mirrors + rdisk;
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ bitmap) {
+ /* Reading from a write-mostly device must
+ * take care not to over-take any writes
+ * that are 'behind'
+ */
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
+ }
r1_bio->read_disk = rdisk;
read_bio = bio_clone(bio, GFP_NOIO);
@@ -912,9 +914,10 @@ static int make_request(struct request_queue *q, struct bio * bio)
if (test_bit(Faulty, &rdev->flags)) {
rdev_dec_pending(rdev, mddev);
r1_bio->bios[i] = NULL;
- } else
+ } else {
r1_bio->bios[i] = bio;
- targets++;
+ targets++;
+ }
} else
r1_bio->bios[i] = NULL;
}
@@ -942,10 +945,14 @@ static int make_request(struct request_queue *q, struct bio * bio)
set_bit(R1BIO_Degraded, &r1_bio->state);
}
- /* do behind I/O ? */
+ /* do behind I/O ?
+ * Not if there are too many, or cannot allocate memory,
+ * or a reader on WriteMostly is waiting for behind writes
+ * to flush */
if (bitmap &&
(atomic_read(&bitmap->behind_writes)
< mddev->bitmap_info.max_write_behind) &&
+ !waitqueue_active(&bitmap->behind_wait) &&
(behind_pages = alloc_behind_pages(bio)) != NULL)
set_bit(R1BIO_BehindIO, &r1_bio->state);
@@ -1070,21 +1077,22 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
} else
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n"
- "raid1: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT "md/raid1:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(conf_t *conf)
{
int i;
- printk("RAID1 conf printout:\n");
+ printk(KERN_DEBUG "RAID1 conf printout:\n");
if (!conf) {
- printk("(!conf)\n");
+ printk(KERN_DEBUG "(!conf)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
conf->raid_disks);
rcu_read_lock();
@@ -1092,7 +1100,7 @@ static void print_conf(conf_t *conf)
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev)
- printk(" disk %d, wo:%d, o:%d, dev:%s\n",
+ printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags),
bdevname(rdev->bdev,b));
@@ -1223,7 +1231,7 @@ abort:
static void end_sync_read(struct bio *bio, int error)
{
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int i;
for (i=r1_bio->mddev->raid_disks; i--; )
@@ -1246,7 +1254,7 @@ static void end_sync_read(struct bio *bio, int error)
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
mddev_t *mddev = r1_bio->mddev;
conf_t *conf = mddev->private;
int i;
@@ -1453,9 +1461,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
char b[BDEVNAME_SIZE];
/* Cannot read from anywhere, array is toast */
md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
- printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
+ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
" for block %llu\n",
- bdevname(bio->bi_bdev,b),
+ mdname(mddev),
+ bdevname(bio->bi_bdev, b),
(unsigned long long)r1_bio->sector);
md_done_sync(mddev, r1_bio->sectors, 0);
put_buf(r1_bio);
@@ -1577,7 +1586,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
else {
atomic_add(s, &rdev->corrected_errors);
printk(KERN_INFO
- "raid1:%s: read error corrected "
+ "md/raid1:%s: read error corrected "
"(%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect +
@@ -1682,8 +1691,9 @@ static void raid1d(mddev_t *mddev)
bio = r1_bio->bios[r1_bio->read_disk];
if ((disk=read_balance(conf, r1_bio)) == -1) {
- printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
+ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
+ mdname(mddev),
bdevname(bio->bi_bdev,b),
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
@@ -1697,10 +1707,11 @@ static void raid1d(mddev_t *mddev)
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
if (printk_ratelimit())
- printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
- " another mirror\n",
- bdevname(rdev->bdev,b),
- (unsigned long long)r1_bio->sector);
+ printk(KERN_ERR "md/raid1:%s: redirecting sector %llu to"
+ " other mirror: %s\n",
+ mdname(mddev),
+ (unsigned long long)r1_bio->sector,
+ bdevname(rdev->bdev,b));
bio->bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
@@ -1755,13 +1766,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
int still_degraded = 0;
if (!conf->r1buf_pool)
- {
-/*
- printk("sync start - bitmap %p\n", mddev->bitmap);
-*/
if (init_resync(conf))
return 0;
- }
max_sector = mddev->dev_sectors;
if (sector_nr >= max_sector) {
@@ -2042,7 +2048,7 @@ static conf_t *setup_conf(mddev_t *mddev)
err = -EIO;
if (conf->last_used < 0) {
- printk(KERN_ERR "raid1: no operational mirrors for %s\n",
+ printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
mdname(mddev));
goto abort;
}
@@ -2050,7 +2056,7 @@ static conf_t *setup_conf(mddev_t *mddev)
conf->thread = md_register_thread(raid1d, mddev, NULL);
if (!conf->thread) {
printk(KERN_ERR
- "raid1: couldn't allocate thread for %s\n",
+ "md/raid1:%s: couldn't allocate thread\n",
mdname(mddev));
goto abort;
}
@@ -2076,12 +2082,12 @@ static int run(mddev_t *mddev)
mdk_rdev_t *rdev;
if (mddev->level != 1) {
- printk("raid1: %s: raid level not set to mirroring (%d)\n",
+ printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
mdname(mddev), mddev->level);
return -EIO;
}
if (mddev->reshape_position != MaxSector) {
- printk("raid1: %s: reshape_position set but not supported\n",
+ printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
mdname(mddev));
return -EIO;
}
@@ -2124,11 +2130,11 @@ static int run(mddev_t *mddev)
mddev->recovery_cp = MaxSector;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid1: %s is not clean"
+ printk(KERN_NOTICE "md/raid1:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO
- "raid1: raid set %s active with %d out of %d mirrors\n",
+ "md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
@@ -2152,15 +2158,14 @@ static int stop(mddev_t *mddev)
{
conf_t *conf = mddev->private;
struct bitmap *bitmap = mddev->bitmap;
- int behind_wait = 0;
/* wait for behind writes to complete */
- while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
- behind_wait++;
- printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ); /* wait a second */
+ if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
+ printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
+ mdname(mddev));
/* need to kick something here to make sure I/O goes? */
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
}
raise_barrier(conf);
@@ -2191,7 +2196,6 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
if (mddev->array_sectors > raid1_size(mddev, sectors, 0))
return -EINVAL;
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp == MaxSector) {
@@ -2286,9 +2290,9 @@ static int raid1_reshape(mddev_t *mddev)
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
printk(KERN_WARNING
- "md/raid1: cannot register "
- "%s for %s\n",
- nm, mdname(mddev));
+ "md/raid1:%s: cannot register "
+ "%s\n",
+ mdname(mddev), nm);
}
if (rdev)
newmirrors[d2++].rdev = rdev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e2766d8251a1..03724992cdf2 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -24,6 +24,7 @@
#include <linux/seq_file.h>
#include "md.h"
#include "raid10.h"
+#include "raid0.h"
#include "bitmap.h"
/*
@@ -255,7 +256,7 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
static void raid10_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
int slot, dev;
conf_t *conf = r10_bio->mddev->private;
@@ -285,7 +286,8 @@ static void raid10_end_read_request(struct bio *bio, int error)
*/
char b[BDEVNAME_SIZE];
if (printk_ratelimit())
- printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n",
+ printk(KERN_ERR "md/raid10:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
reschedule_retry(r10_bio);
}
@@ -296,7 +298,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
int slot, dev;
conf_t *conf = r10_bio->mddev->private;
@@ -494,7 +496,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
*/
static int read_balance(conf_t *conf, r10bio_t *r10_bio)
{
- const unsigned long this_sector = r10_bio->sector;
+ const sector_t this_sector = r10_bio->sector;
int disk, slot, nslot;
const int sectors = r10_bio->sectors;
sector_t new_distance, current_distance;
@@ -601,7 +603,7 @@ static void unplug_slaves(mddev_t *mddev)
int i;
rcu_read_lock();
- for (i=0; i<mddev->raid_disks; i++) {
+ for (i=0; i < conf->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -635,7 +637,7 @@ static int raid10_congested(void *data, int bits)
if (mddev_congested(mddev, bits))
return 1;
rcu_read_lock();
- for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
+ for (i = 0; i < conf->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -788,14 +790,12 @@ static void unfreeze_array(conf_t *conf)
spin_unlock_irq(&conf->resync_lock);
}
-static int make_request(struct request_queue *q, struct bio * bio)
+static int make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
conf_t *conf = mddev->private;
mirror_info_t *mirror;
r10bio_t *r10_bio;
struct bio *read_bio;
- int cpu;
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
@@ -825,16 +825,16 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
bp = bio_split(bio,
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
- if (make_request(q, &bp->bio1))
+ if (make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (make_request(q, &bp->bio2))
+ if (make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
return 0;
bad_map:
- printk("raid10_make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", chunk_sects/2,
+ printk("md/raid10:%s: make_request bug: can't convert block across chunks"
+ " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio);
@@ -850,12 +850,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
wait_barrier(conf);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio;
@@ -1039,9 +1033,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
}
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "raid10: Disk failure on %s, disabling device.\n"
- "raid10: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ printk(KERN_ALERT "md/raid10:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT "md/raid10:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(conf_t *conf)
@@ -1049,19 +1044,19 @@ static void print_conf(conf_t *conf)
int i;
mirror_info_t *tmp;
- printk("RAID10 conf printout:\n");
+ printk(KERN_DEBUG "RAID10 conf printout:\n");
if (!conf) {
- printk("(!conf)\n");
+ printk(KERN_DEBUG "(!conf)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->mirrors + i;
if (tmp->rdev)
- printk(" disk %d, wo:%d, o:%d, dev:%s\n",
+ printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
i, !test_bit(In_sync, &tmp->rdev->flags),
!test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
@@ -1132,7 +1127,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
int mirror;
mirror_info_t *p;
int first = 0;
- int last = mddev->raid_disks - 1;
+ int last = conf->raid_disks - 1;
if (mddev->recovery_cp < MaxSector)
/* only hot-add to in-sync arrays, as recovery is
@@ -1224,7 +1219,7 @@ abort:
static void end_sync_read(struct bio *bio, int error)
{
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
conf_t *conf = r10_bio->mddev->private;
int i,d;
@@ -1261,7 +1256,7 @@ static void end_sync_read(struct bio *bio, int error)
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
mddev_t *mddev = r10_bio->mddev;
conf_t *conf = mddev->private;
int i,d;
@@ -1510,13 +1505,14 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
if (cur_read_error_count > max_read_errors) {
rcu_read_unlock();
printk(KERN_NOTICE
- "raid10: %s: Raid device exceeded "
+ "md/raid10:%s: %s: Raid device exceeded "
"read_error threshold "
"[cur %d:max %d]\n",
+ mdname(mddev),
b, cur_read_error_count, max_read_errors);
printk(KERN_NOTICE
- "raid10: %s: Failing raid "
- "device\n", b);
+ "md/raid10:%s: %s: Failing raid "
+ "device\n", mdname(mddev), b);
md_error(mddev, conf->mirrors[d].rdev);
return;
}
@@ -1586,15 +1582,16 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
== 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
- "raid10:%s: read correction "
+ "md/raid10:%s: read correction "
"write failed"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
rdev->data_offset),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "raid10:%s: failing "
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing "
"drive\n",
+ mdname(mddev),
bdevname(rdev->bdev, b));
md_error(mddev, rdev);
}
@@ -1622,20 +1619,21 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
READ) == 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
- "raid10:%s: unable to read back "
+ "md/raid10:%s: unable to read back "
"corrected sectors"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
rdev->data_offset),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "raid10:%s: failing drive\n",
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing drive\n",
+ mdname(mddev),
bdevname(rdev->bdev, b));
md_error(mddev, rdev);
} else {
printk(KERN_INFO
- "raid10:%s: read error corrected"
+ "md/raid10:%s: read error corrected"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
@@ -1710,8 +1708,9 @@ static void raid10d(mddev_t *mddev)
mddev->ro ? IO_BLOCKED : NULL;
mirror = read_balance(conf, r10_bio);
if (mirror == -1) {
- printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
+ printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
+ mdname(mddev),
bdevname(bio->bi_bdev,b),
(unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
@@ -1721,8 +1720,9 @@ static void raid10d(mddev_t *mddev)
bio_put(bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
- printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
+ printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
" another mirror\n",
+ mdname(mddev),
bdevname(rdev->bdev,b),
(unsigned long long)r10_bio->sector);
bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
@@ -1980,7 +1980,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
r10_bio = rb2;
if (!test_and_set_bit(MD_RECOVERY_INTR,
&mddev->recovery))
- printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
+ printk(KERN_INFO "md/raid10:%s: insufficient "
+ "working devices for recovery.\n",
mdname(mddev));
break;
}
@@ -2140,9 +2141,9 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
conf_t *conf = mddev->private;
if (!raid_disks)
- raid_disks = mddev->raid_disks;
+ raid_disks = conf->raid_disks;
if (!sectors)
- sectors = mddev->dev_sectors;
+ sectors = conf->dev_sectors;
size = sectors >> conf->chunk_shift;
sector_div(size, conf->far_copies);
@@ -2152,62 +2153,61 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return size << conf->chunk_shift;
}
-static int run(mddev_t *mddev)
+
+static conf_t *setup_conf(mddev_t *mddev)
{
- conf_t *conf;
- int i, disk_idx, chunk_size;
- mirror_info_t *disk;
- mdk_rdev_t *rdev;
+ conf_t *conf = NULL;
int nc, fc, fo;
sector_t stride, size;
+ int err = -EINVAL;
if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
!is_power_of_2(mddev->chunk_sectors)) {
- printk(KERN_ERR "md/raid10: chunk size must be "
- "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
- return -EINVAL;
+ printk(KERN_ERR "md/raid10:%s: chunk size must be "
+ "at least PAGE_SIZE(%ld) and be a power of 2.\n",
+ mdname(mddev), PAGE_SIZE);
+ goto out;
}
nc = mddev->layout & 255;
fc = (mddev->layout >> 8) & 255;
fo = mddev->layout & (1<<16);
+
if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
(mddev->layout >> 17)) {
- printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
+ printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
mdname(mddev), mddev->layout);
goto out;
}
- /*
- * copy the already verified devices into our private RAID10
- * bookkeeping area. [whatever we allocate in run(),
- * should be freed in stop()]
- */
+
+ err = -ENOMEM;
conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
- mddev->private = conf;
- if (!conf) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf)
goto out;
- }
+
conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
- GFP_KERNEL);
- if (!conf->mirrors) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
+ GFP_KERNEL);
+ if (!conf->mirrors)
+ goto out;
conf->tmppage = alloc_page(GFP_KERNEL);
if (!conf->tmppage)
- goto out_free_conf;
+ goto out;
+
conf->raid_disks = mddev->raid_disks;
conf->near_copies = nc;
conf->far_copies = fc;
conf->copies = nc*fc;
conf->far_offset = fo;
- conf->chunk_mask = mddev->chunk_sectors - 1;
- conf->chunk_shift = ffz(~mddev->chunk_sectors);
+ conf->chunk_mask = mddev->new_chunk_sectors - 1;
+ conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
+
+ conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
+ r10bio_pool_free, conf);
+ if (!conf->r10bio_pool)
+ goto out;
+
size = mddev->dev_sectors >> conf->chunk_shift;
sector_div(size, fc);
size = size * conf->raid_disks;
@@ -2221,7 +2221,8 @@ static int run(mddev_t *mddev)
*/
stride += conf->raid_disks - 1;
sector_div(stride, conf->raid_disks);
- mddev->dev_sectors = stride << conf->chunk_shift;
+
+ conf->dev_sectors = stride << conf->chunk_shift;
if (fo)
stride = 1;
@@ -2229,18 +2230,63 @@ static int run(mddev_t *mddev)
sector_div(stride, fc);
conf->stride = stride << conf->chunk_shift;
- conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
- r10bio_pool_free, conf);
- if (!conf->r10bio_pool) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
- conf->mddev = mddev;
spin_lock_init(&conf->device_lock);
+ INIT_LIST_HEAD(&conf->retry_list);
+
+ spin_lock_init(&conf->resync_lock);
+ init_waitqueue_head(&conf->wait_barrier);
+
+ conf->thread = md_register_thread(raid10d, mddev, NULL);
+ if (!conf->thread)
+ goto out;
+
+ conf->scale_disks = 0;
+ conf->mddev = mddev;
+ return conf;
+
+ out:
+ printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
+ mdname(mddev));
+ if (conf) {
+ if (conf->r10bio_pool)
+ mempool_destroy(conf->r10bio_pool);
+ kfree(conf->mirrors);
+ safe_put_page(conf->tmppage);
+ kfree(conf);
+ }
+ return ERR_PTR(err);
+}
+
+static int run(mddev_t *mddev)
+{
+ conf_t *conf;
+ int i, disk_idx, chunk_size;
+ mirror_info_t *disk;
+ mdk_rdev_t *rdev;
+ sector_t size;
+
+ /*
+ * copy the already verified devices into our private RAID10
+ * bookkeeping area. [whatever we allocate in run(),
+ * should be freed in stop()]
+ */
+
+ if (mddev->private == NULL) {
+ conf = setup_conf(mddev);
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
+ mddev->private = conf;
+ }
+ conf = mddev->private;
+ if (!conf)
+ goto out;
+
mddev->queue->queue_lock = &conf->device_lock;
+ mddev->thread = conf->thread;
+ conf->thread = NULL;
+
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
if (conf->raid_disks % conf->near_copies)
@@ -2251,9 +2297,14 @@ static int run(mddev_t *mddev)
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_idx = rdev->raid_disk;
- if (disk_idx >= mddev->raid_disks
+ if (disk_idx >= conf->raid_disks
|| disk_idx < 0)
continue;
+ if (conf->scale_disks) {
+ disk_idx *= conf->scale_disks;
+ rdev->raid_disk = disk_idx;
+ /* MOVE 'rd%d' link !! */
+ }
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
@@ -2271,14 +2322,9 @@ static int run(mddev_t *mddev)
disk->head_position = 0;
}
- INIT_LIST_HEAD(&conf->retry_list);
-
- spin_lock_init(&conf->resync_lock);
- init_waitqueue_head(&conf->wait_barrier);
-
/* need to check that every block has at least one working mirror */
if (!enough(conf)) {
- printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
+ printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
mdname(mddev));
goto out_free_conf;
}
@@ -2297,28 +2343,21 @@ static int run(mddev_t *mddev)
}
}
-
- mddev->thread = md_register_thread(raid10d, mddev, NULL);
- if (!mddev->thread) {
- printk(KERN_ERR
- "raid10: couldn't allocate thread for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
-
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid10: %s is not clean"
+ printk(KERN_NOTICE "md/raid10:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO
- "raid10: raid set %s active with %d out of %d devices\n",
- mdname(mddev), mddev->raid_disks - mddev->degraded,
- mddev->raid_disks);
+ "md/raid10:%s: active with %d out of %d devices\n",
+ mdname(mddev), conf->raid_disks - mddev->degraded,
+ conf->raid_disks);
/*
* Ok, everything is just fine now
*/
- md_set_array_sectors(mddev, raid10_size(mddev, 0, 0));
- mddev->resync_max_sectors = raid10_size(mddev, 0, 0);
+ mddev->dev_sectors = conf->dev_sectors;
+ size = raid10_size(mddev, 0, 0);
+ md_set_array_sectors(mddev, size);
+ mddev->resync_max_sectors = size;
mddev->queue->unplug_fn = raid10_unplug;
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
@@ -2336,7 +2375,7 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
}
- if (conf->near_copies < mddev->raid_disks)
+ if (conf->near_copies < conf->raid_disks)
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
md_integrity_register(mddev);
return 0;
@@ -2348,6 +2387,7 @@ out_free_conf:
kfree(conf->mirrors);
kfree(conf);
mddev->private = NULL;
+ md_unregister_thread(mddev->thread);
out:
return -EIO;
}
@@ -2384,6 +2424,61 @@ static void raid10_quiesce(mddev_t *mddev, int state)
}
}
+static void *raid10_takeover_raid0(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+ conf_t *conf;
+
+ if (mddev->degraded > 0) {
+ printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Update slot numbers to obtain
+ * degraded raid10 with missing mirrors
+ */
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ rdev->raid_disk *= 2;
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 10;
+ /* new layout: far_copies = 1, near_copies = 2 */
+ mddev->new_layout = (1<<8) + 2;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->delta_disks = mddev->raid_disks;
+ mddev->degraded = mddev->raid_disks;
+ mddev->raid_disks *= 2;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ conf = setup_conf(mddev);
+ conf->scale_disks = 2;
+ return conf;
+}
+
+static void *raid10_takeover(mddev_t *mddev)
+{
+ struct raid0_private_data *raid0_priv;
+
+ /* raid10 can take over:
+ * raid0 - providing it has only two drives
+ */
+ if (mddev->level == 0) {
+ /* for raid0 takeover only one zone is supported */
+ raid0_priv = mddev->private;
+ if (raid0_priv->nr_strip_zones > 1) {
+ printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
+ " with more than one zone.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ return raid10_takeover_raid0(mddev);
+ }
+ return ERR_PTR(-EINVAL);
+}
+
static struct mdk_personality raid10_personality =
{
.name = "raid10",
@@ -2400,6 +2495,7 @@ static struct mdk_personality raid10_personality =
.sync_request = sync_request,
.quiesce = raid10_quiesce,
.size = raid10_size,
+ .takeover = raid10_takeover,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 59cd1efb8d30..3824a087e17c 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -33,9 +33,16 @@ struct r10_private_data_s {
* 1 stripe.
*/
+ sector_t dev_sectors; /* temp copy of mddev->dev_sectors */
+
int chunk_shift; /* shift from chunks to sectors */
sector_t chunk_mask;
+ int scale_disks; /* When starting array, multiply
+ * each ->raid_disk by this.
+ * Need for raid0->raid10 migration
+ */
+
struct list_head retry_list;
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
@@ -57,6 +64,11 @@ struct r10_private_data_s {
mempool_t *r10bio_pool;
mempool_t *r10buf_pool;
struct page *tmppage;
+
+ /* When taking over an array from a different personality, we store
+ * the new thread here until we fully activate the array.
+ */
+ struct mdk_thread_s *thread;
};
typedef struct r10_private_data_s conf_t;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 15348c393b5d..d2c0f94fa37d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,7 @@
#include <linux/slab.h>
#include "md.h"
#include "raid5.h"
+#include "raid0.h"
#include "bitmap.h"
/*
@@ -1509,7 +1510,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
rdev = conf->disks[i].rdev;
- printk_rl(KERN_INFO "raid5:%s: read error corrected"
+ printk_rl(KERN_INFO "md/raid:%s: read error corrected"
" (%lu sectors at %llu on %s)\n",
mdname(conf->mddev), STRIPE_SECTORS,
(unsigned long long)(sh->sector
@@ -1529,7 +1530,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
atomic_inc(&rdev->read_errors);
if (conf->mddev->degraded >= conf->max_degraded)
printk_rl(KERN_WARNING
- "raid5:%s: read error not correctable "
+ "md/raid:%s: read error not correctable "
"(sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)(sh->sector
@@ -1538,7 +1539,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
/* Oh, no!!! */
printk_rl(KERN_WARNING
- "raid5:%s: read error NOT corrected!! "
+ "md/raid:%s: read error NOT corrected!! "
"(sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)(sh->sector
@@ -1547,7 +1548,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
printk(KERN_WARNING
- "raid5:%s: Too many read errors, failing device %s.\n",
+ "md/raid:%s: Too many read errors, failing device %s.\n",
mdname(conf->mddev), bdn);
else
retry = 1;
@@ -1619,8 +1620,8 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
char b[BDEVNAME_SIZE];
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
- pr_debug("raid5: error called\n");
+ raid5_conf_t *conf = mddev->private;
+ pr_debug("raid456: error called\n");
if (!test_bit(Faulty, &rdev->flags)) {
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1636,9 +1637,13 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
}
set_bit(Faulty, &rdev->flags);
printk(KERN_ALERT
- "raid5: Disk failure on %s, disabling device.\n"
- "raid5: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ "md/raid:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT
+ "md/raid:%s: Operation continuing on %d devices.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ mdname(mddev),
+ conf->raid_disks - mddev->degraded);
}
}
@@ -1714,8 +1719,6 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
pd_idx = data_disks;
break;
default:
- printk(KERN_ERR "raid5: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1832,10 +1835,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
qd_idx = raid_disks - 1;
break;
-
default:
- printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1898,8 +1898,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
case ALGORITHM_PARITY_N:
break;
default:
- printk(KERN_ERR "raid5: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1958,8 +1956,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
i -= 1;
break;
default:
- printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1972,7 +1968,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
previous, &dummy1, &sh2);
if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
|| sh2.qd_idx != sh->qd_idx) {
- printk(KERN_ERR "compute_blocknr: map not correct\n");
+ printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
+ mdname(conf->mddev));
return 0;
}
return r_sector;
@@ -3709,10 +3706,10 @@ static void raid5_align_endio(struct bio *bi, int error)
bio_put(bi);
- mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
- conf = mddev->private;
rdev = (void*)raid_bi->bi_next;
raid_bi->bi_next = NULL;
+ mddev = rdev->mddev;
+ conf = mddev->private;
rdev_dec_pending(rdev, conf->mddev);
@@ -3749,9 +3746,8 @@ static int bio_fits_rdev(struct bio *bi)
}
-static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
+static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
{
- mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
int dd_idx;
struct bio* align_bi;
@@ -3866,16 +3862,15 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
return sh;
}
-static int make_request(struct request_queue *q, struct bio * bi)
+static int make_request(mddev_t *mddev, struct bio * bi)
{
- mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
int dd_idx;
sector_t new_sector;
sector_t logical_sector, last_sector;
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
- int cpu, remaining;
+ int remaining;
if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
/* Drain all pending writes. We only really need
@@ -3890,15 +3885,9 @@ static int make_request(struct request_queue *q, struct bio * bi)
md_write_start(mddev, bi);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bi));
- part_stat_unlock();
-
if (rw == READ &&
mddev->reshape_position == MaxSector &&
- chunk_aligned_read(q,bi))
+ chunk_aligned_read(mddev,bi))
return 0;
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
@@ -3946,7 +3935,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
new_sector = raid5_compute_sector(conf, logical_sector,
previous,
&dd_idx, NULL);
- pr_debug("raid5: make_request, sector %llu logical %llu\n",
+ pr_debug("raid456: make_request, sector %llu logical %llu\n",
(unsigned long long)new_sector,
(unsigned long long)logical_sector);
@@ -4054,7 +4043,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
* As the reads complete, handle_stripe will copy the data
* into the destination stripe and release that stripe.
*/
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t first_sector, last_sector;
int raid_disks = conf->previous_raid_disks;
@@ -4263,7 +4252,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
/* FIXME go_faster isn't used */
static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t max_sector = mddev->dev_sectors;
int sync_blocks;
@@ -4656,7 +4645,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
kfree(percpu->scribble);
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
- return NOTIFY_BAD;
+ return notifier_from_errno(-ENOMEM);
}
break;
case CPU_DEAD:
@@ -4725,7 +4714,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (mddev->new_level != 5
&& mddev->new_level != 4
&& mddev->new_level != 6) {
- printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
+ printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
mdname(mddev), mddev->new_level);
return ERR_PTR(-EIO);
}
@@ -4733,12 +4722,12 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
&& !algorithm_valid_raid5(mddev->new_layout)) ||
(mddev->new_level == 6
&& !algorithm_valid_raid6(mddev->new_layout))) {
- printk(KERN_ERR "raid5: %s: layout %d not supported\n",
+ printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
mdname(mddev), mddev->new_layout);
return ERR_PTR(-EIO);
}
if (mddev->new_level == 6 && mddev->raid_disks < 4) {
- printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
+ printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
mdname(mddev), mddev->raid_disks);
return ERR_PTR(-EINVAL);
}
@@ -4746,8 +4735,8 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (!mddev->new_chunk_sectors ||
(mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
!is_power_of_2(mddev->new_chunk_sectors)) {
- printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
- mddev->new_chunk_sectors << 9, mdname(mddev));
+ printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
+ mdname(mddev), mddev->new_chunk_sectors << 9);
return ERR_PTR(-EINVAL);
}
@@ -4789,7 +4778,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (raid5_alloc_percpu(conf) != 0)
goto abort;
- pr_debug("raid5: run(%s) called.\n", mdname(mddev));
+ pr_debug("raid456: run(%s) called.\n", mdname(mddev));
list_for_each_entry(rdev, &mddev->disks, same_set) {
raid_disk = rdev->raid_disk;
@@ -4802,9 +4791,9 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "raid5: device %s operational as raid"
- " disk %d\n", bdevname(rdev->bdev,b),
- raid_disk);
+ printk(KERN_INFO "md/raid:%s: device %s operational as raid"
+ " disk %d\n",
+ mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
} else
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
@@ -4828,16 +4817,17 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
if (grow_stripes(conf, conf->max_nr_stripes)) {
printk(KERN_ERR
- "raid5: couldn't allocate %dkB for buffers\n", memory);
+ "md/raid:%s: couldn't allocate %dkB for buffers\n",
+ mdname(mddev), memory);
goto abort;
} else
- printk(KERN_INFO "raid5: allocated %dkB for %s\n",
- memory, mdname(mddev));
+ printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
+ mdname(mddev), memory);
conf->thread = md_register_thread(raid5d, mddev, NULL);
if (!conf->thread) {
printk(KERN_ERR
- "raid5: couldn't allocate thread for %s\n",
+ "md/raid:%s: couldn't allocate thread.\n",
mdname(mddev));
goto abort;
}
@@ -4888,7 +4878,7 @@ static int run(mddev_t *mddev)
sector_t reshape_offset = 0;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid5: %s is not clean"
+ printk(KERN_NOTICE "md/raid:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
if (mddev->reshape_position != MaxSector) {
@@ -4902,7 +4892,7 @@ static int run(mddev_t *mddev)
int max_degraded = (mddev->level == 6 ? 2 : 1);
if (mddev->new_level != mddev->level) {
- printk(KERN_ERR "raid5: %s: unsupported reshape "
+ printk(KERN_ERR "md/raid:%s: unsupported reshape "
"required - aborting.\n",
mdname(mddev));
return -EINVAL;
@@ -4915,8 +4905,8 @@ static int run(mddev_t *mddev)
here_new = mddev->reshape_position;
if (sector_div(here_new, mddev->new_chunk_sectors *
(mddev->raid_disks - max_degraded))) {
- printk(KERN_ERR "raid5: reshape_position not "
- "on a stripe boundary\n");
+ printk(KERN_ERR "md/raid:%s: reshape_position not "
+ "on a stripe boundary\n", mdname(mddev));
return -EINVAL;
}
reshape_offset = here_new * mddev->new_chunk_sectors;
@@ -4937,8 +4927,9 @@ static int run(mddev_t *mddev)
if ((here_new * mddev->new_chunk_sectors !=
here_old * mddev->chunk_sectors) ||
mddev->ro == 0) {
- printk(KERN_ERR "raid5: in-place reshape must be started"
- " in read-only mode - aborting\n");
+ printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
+ " in read-only mode - aborting\n",
+ mdname(mddev));
return -EINVAL;
}
} else if (mddev->delta_disks < 0
@@ -4947,11 +4938,13 @@ static int run(mddev_t *mddev)
: (here_new * mddev->new_chunk_sectors >=
here_old * mddev->chunk_sectors)) {
/* Reading from the same stripe as writing to - bad */
- printk(KERN_ERR "raid5: reshape_position too early for "
- "auto-recovery - aborting.\n");
+ printk(KERN_ERR "md/raid:%s: reshape_position too early for "
+ "auto-recovery - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
- printk(KERN_INFO "raid5: reshape will continue\n");
+ printk(KERN_INFO "md/raid:%s: reshape will continue\n",
+ mdname(mddev));
/* OK, we should be able to continue; */
} else {
BUG_ON(mddev->level != mddev->new_level);
@@ -4993,18 +4986,6 @@ static int run(mddev_t *mddev)
mddev->minor_version > 90)
rdev->recovery_offset = reshape_offset;
- printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
- rdev->raid_disk, working_disks, conf->prev_algo,
- conf->previous_raid_disks, conf->max_degraded,
- conf->algorithm, conf->raid_disks,
- only_parity(rdev->raid_disk,
- conf->prev_algo,
- conf->previous_raid_disks,
- conf->max_degraded),
- only_parity(rdev->raid_disk,
- conf->algorithm,
- conf->raid_disks,
- conf->max_degraded));
if (rdev->recovery_offset < reshape_offset) {
/* We need to check old and new layout */
if (!only_parity(rdev->raid_disk,
@@ -5025,7 +5006,7 @@ static int run(mddev_t *mddev)
- working_disks);
if (mddev->degraded > conf->max_degraded) {
- printk(KERN_ERR "raid5: not enough operational devices for %s"
+ printk(KERN_ERR "md/raid:%s: not enough operational devices"
" (%d/%d failed)\n",
mdname(mddev), mddev->degraded, conf->raid_disks);
goto abort;
@@ -5039,32 +5020,32 @@ static int run(mddev_t *mddev)
mddev->recovery_cp != MaxSector) {
if (mddev->ok_start_degraded)
printk(KERN_WARNING
- "raid5: starting dirty degraded array: %s"
- "- data corruption possible.\n",
+ "md/raid:%s: starting dirty degraded array"
+ " - data corruption possible.\n",
mdname(mddev));
else {
printk(KERN_ERR
- "raid5: cannot start dirty degraded array for %s\n",
+ "md/raid:%s: cannot start dirty degraded array.\n",
mdname(mddev));
goto abort;
}
}
if (mddev->degraded == 0)
- printk("raid5: raid level %d set %s active with %d out of %d"
- " devices, algorithm %d\n", conf->level, mdname(mddev),
+ printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
+ " devices, algorithm %d\n", mdname(mddev), conf->level,
mddev->raid_disks-mddev->degraded, mddev->raid_disks,
mddev->new_layout);
else
- printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
- " out of %d devices, algorithm %d\n", conf->level,
- mdname(mddev), mddev->raid_disks - mddev->degraded,
- mddev->raid_disks, mddev->new_layout);
+ printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
+ " out of %d devices, algorithm %d\n",
+ mdname(mddev), conf->level,
+ mddev->raid_disks - mddev->degraded,
+ mddev->raid_disks, mddev->new_layout);
print_raid5_conf(conf);
if (conf->reshape_progress != MaxSector) {
- printk("...ok start reshape thread\n");
conf->reshape_safe = conf->reshape_progress;
atomic_set(&conf->reshape_stripes, 0);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -5087,9 +5068,11 @@ static int run(mddev_t *mddev)
}
/* Ok, everything is just fine now */
- if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
+ if (mddev->to_remove == &raid5_attrs_group)
+ mddev->to_remove = NULL;
+ else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
printk(KERN_WARNING
- "raid5: failed to create sysfs attributes for %s\n",
+ "md/raid:%s: failed to create sysfs attributes.\n",
mdname(mddev));
mddev->queue->queue_lock = &conf->device_lock;
@@ -5119,22 +5102,21 @@ abort:
free_conf(conf);
}
mddev->private = NULL;
- printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
+ printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
return -EIO;
}
-
-
static int stop(mddev_t *mddev)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
free_conf(conf);
- mddev->private = &raid5_attrs_group;
+ mddev->private = NULL;
+ mddev->to_remove = &raid5_attrs_group;
return 0;
}
@@ -5175,7 +5157,7 @@ static void printall(struct seq_file *seq, raid5_conf_t *conf)
static void status(struct seq_file *seq, mddev_t *mddev)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
int i;
seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
@@ -5197,21 +5179,22 @@ static void print_raid5_conf (raid5_conf_t *conf)
int i;
struct disk_info *tmp;
- printk("RAID5 conf printout:\n");
+ printk(KERN_DEBUG "RAID conf printout:\n");
if (!conf) {
printk("(conf==NULL)\n");
return;
}
- printk(" --- rd:%d wd:%d\n", conf->raid_disks,
- conf->raid_disks - conf->mddev->degraded);
+ printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
+ conf->raid_disks,
+ conf->raid_disks - conf->mddev->degraded);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->disks + i;
if (tmp->rdev)
- printk(" disk %d, o:%d, dev:%s\n",
- i, !test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
+ i, !test_bit(Faulty, &tmp->rdev->flags),
+ bdevname(tmp->rdev->bdev, b));
}
}
@@ -5334,7 +5317,6 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
raid5_size(mddev, sectors, mddev->raid_disks))
return -EINVAL;
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
mddev->recovery_cp = mddev->dev_sectors;
@@ -5360,7 +5342,8 @@ static int check_stripe_cache(mddev_t *mddev)
> conf->max_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->max_nr_stripes) {
- printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
+ printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
+ mdname(mddev),
((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
/ STRIPE_SIZE)*4);
return 0;
@@ -5431,7 +5414,7 @@ static int raid5_start_reshape(mddev_t *mddev)
*/
if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
< mddev->array_sectors) {
- printk(KERN_ERR "md: %s: array size must be reduced "
+ printk(KERN_ERR "md/raid:%s: array size must be reduced "
"before number of disks\n", mdname(mddev));
return -EINVAL;
}
@@ -5469,9 +5452,9 @@ static int raid5_start_reshape(mddev_t *mddev)
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
printk(KERN_WARNING
- "raid5: failed to create "
- " link %s for %s\n",
- nm, mdname(mddev));
+ "md/raid:%s: failed to create "
+ " link %s\n",
+ mdname(mddev), nm);
} else
break;
}
@@ -5548,7 +5531,6 @@ static void raid5_finish_reshape(mddev_t *mddev)
if (mddev->delta_disks > 0) {
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
} else {
int d;
@@ -5613,6 +5595,29 @@ static void raid5_quiesce(mddev_t *mddev, int state)
}
+static void *raid45_takeover_raid0(mddev_t *mddev, int level)
+{
+ struct raid0_private_data *raid0_priv = mddev->private;
+
+ /* for raid0 takeover only one zone is supported */
+ if (raid0_priv->nr_strip_zones > 1) {
+ printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ mddev->new_level = level;
+ mddev->new_layout = ALGORITHM_PARITY_N;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->raid_disks += 1;
+ mddev->delta_disks = 1;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ return setup_conf(mddev);
+}
+
+
static void *raid5_takeover_raid1(mddev_t *mddev)
{
int chunksect;
@@ -5737,12 +5742,13 @@ static int raid6_check_reshape(mddev_t *mddev)
static void *raid5_takeover(mddev_t *mddev)
{
/* raid5 can take over:
- * raid0 - if all devices are the same - make it a raid4 layout
+ * raid0 - if there is only one strip zone - make it a raid4 layout
* raid1 - if there are two drives. We need to know the chunk size
* raid4 - trivial - just use a raid4 layout.
* raid6 - Providing it is a *_6 layout
*/
-
+ if (mddev->level == 0)
+ return raid45_takeover_raid0(mddev, 5);
if (mddev->level == 1)
return raid5_takeover_raid1(mddev);
if (mddev->level == 4) {
@@ -5756,6 +5762,22 @@ static void *raid5_takeover(mddev_t *mddev)
return ERR_PTR(-EINVAL);
}
+static void *raid4_takeover(mddev_t *mddev)
+{
+ /* raid4 can take over:
+ * raid0 - if there is only one strip zone
+ * raid5 - if layout is right
+ */
+ if (mddev->level == 0)
+ return raid45_takeover_raid0(mddev, 4);
+ if (mddev->level == 5 &&
+ mddev->layout == ALGORITHM_PARITY_N) {
+ mddev->new_layout = 0;
+ mddev->new_level = 4;
+ return setup_conf(mddev);
+ }
+ return ERR_PTR(-EINVAL);
+}
static struct mdk_personality raid5_personality;
@@ -5871,6 +5893,7 @@ static struct mdk_personality raid4_personality =
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
+ .takeover = raid4_takeover,
};
static int __init raid5_init(void)
diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig
index 195c6cf359f6..d22a8ec523fc 100644
--- a/drivers/media/IR/Kconfig
+++ b/drivers/media/IR/Kconfig
@@ -13,6 +13,7 @@ source "drivers/media/IR/keymaps/Kconfig"
config IR_NEC_DECODER
tristate "Enable IR raw decoder for the NEC protocol"
depends on IR_CORE
+ select BITREVERSE
default y
---help---
@@ -22,6 +23,7 @@ config IR_NEC_DECODER
config IR_RC5_DECODER
tristate "Enable IR raw decoder for the RC-5 protocol"
depends on IR_CORE
+ select BITREVERSE
default y
---help---
diff --git a/drivers/media/IR/imon.c b/drivers/media/IR/imon.c
index 5e2045670004..4bbd45f4284c 100644
--- a/drivers/media/IR/imon.c
+++ b/drivers/media/IR/imon.c
@@ -94,6 +94,7 @@ struct imon_context {
bool display_supported; /* not all controllers do */
bool display_isopen; /* display port has been opened */
+ bool rf_device; /* true if iMON 2.4G LT/DT RF device */
bool rf_isassociating; /* RF remote associating */
bool dev_present_intf0; /* USB device presence, interface 0 */
bool dev_present_intf1; /* USB device presence, interface 1 */
@@ -385,7 +386,7 @@ static int display_open(struct inode *inode, struct file *file)
err("%s: display port is already open", __func__);
retval = -EBUSY;
} else {
- ictx->display_isopen = 1;
+ ictx->display_isopen = true;
file->private_data = ictx;
dev_dbg(ictx->dev, "display port opened\n");
}
@@ -422,7 +423,7 @@ static int display_close(struct inode *inode, struct file *file)
err("%s: display is not open", __func__);
retval = -EIO;
} else {
- ictx->display_isopen = 0;
+ ictx->display_isopen = false;
dev_dbg(ictx->dev, "display port closed\n");
if (!ictx->dev_present_intf0) {
/*
@@ -491,12 +492,12 @@ static int send_packet(struct imon_context *ictx)
}
init_completion(&ictx->tx.finished);
- ictx->tx.busy = 1;
+ ictx->tx.busy = true;
smp_rmb(); /* ensure later readers know we're busy */
retval = usb_submit_urb(ictx->tx_urb, GFP_KERNEL);
if (retval) {
- ictx->tx.busy = 0;
+ ictx->tx.busy = false;
smp_rmb(); /* ensure later readers know we're not busy */
err("%s: error submitting urb(%d)", __func__, retval);
} else {
@@ -682,7 +683,7 @@ static ssize_t store_associate_remote(struct device *d,
return -ENODEV;
mutex_lock(&ictx->lock);
- ictx->rf_isassociating = 1;
+ ictx->rf_isassociating = true;
send_associate_24g(ictx);
mutex_unlock(&ictx->lock);
@@ -950,7 +951,7 @@ static void usb_tx_callback(struct urb *urb)
ictx->tx.status = urb->status;
/* notify waiters that write has finished */
- ictx->tx.busy = 0;
+ ictx->tx.busy = false;
smp_rmb(); /* ensure later readers know we're not busy */
complete(&ictx->tx.finished);
}
@@ -1215,7 +1216,7 @@ static bool imon_mouse_event(struct imon_context *ictx,
{
char rel_x = 0x00, rel_y = 0x00;
u8 right_shift = 1;
- bool mouse_input = 1;
+ bool mouse_input = true;
int dir = 0;
/* newer iMON device PAD or mouse button */
@@ -1246,7 +1247,7 @@ static bool imon_mouse_event(struct imon_context *ictx,
} else if (ictx->kc == KEY_CHANNELDOWN && (buf[2] & 0x40) != 0x40) {
dir = -1;
} else
- mouse_input = 0;
+ mouse_input = false;
if (mouse_input) {
dev_dbg(ictx->dev, "sending mouse data via input subsystem\n");
@@ -1450,7 +1451,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
unsigned char *buf = urb->transfer_buffer;
struct device *dev = ictx->dev;
u32 kc;
- bool norelease = 0;
+ bool norelease = false;
int i;
u64 temp_key;
u64 panel_key = 0;
@@ -1465,7 +1466,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
idev = ictx->idev;
/* filter out junk data on the older 0xffdc imon devices */
- if ((buf[0] == 0xff) && (buf[7] == 0xff))
+ if ((buf[0] == 0xff) && (buf[1] == 0xff) && (buf[2] == 0xff))
return;
/* Figure out what key was pressed */
@@ -1517,7 +1518,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
!(buf[1] & 0x1 || buf[1] >> 2 & 0x1))) {
len = 8;
imon_pad_to_keys(ictx, buf);
- norelease = 1;
+ norelease = true;
}
if (debug) {
@@ -1580,7 +1581,7 @@ not_input_data:
(buf[6] == 0x5E && buf[7] == 0xDF))) { /* DT */
dev_warn(dev, "%s: remote associated refid=%02X\n",
__func__, buf[1]);
- ictx->rf_isassociating = 0;
+ ictx->rf_isassociating = false;
}
}
@@ -1790,9 +1791,9 @@ static bool imon_find_endpoints(struct imon_context *ictx,
int ifnum = iface_desc->desc.bInterfaceNumber;
int num_endpts = iface_desc->desc.bNumEndpoints;
int i, ep_dir, ep_type;
- bool ir_ep_found = 0;
- bool display_ep_found = 0;
- bool tx_control = 0;
+ bool ir_ep_found = false;
+ bool display_ep_found = false;
+ bool tx_control = false;
/*
* Scan the endpoint list and set:
@@ -1808,13 +1809,13 @@ static bool imon_find_endpoints(struct imon_context *ictx,
ep_type == USB_ENDPOINT_XFER_INT) {
rx_endpoint = ep;
- ir_ep_found = 1;
+ ir_ep_found = true;
dev_dbg(ictx->dev, "%s: found IR endpoint\n", __func__);
} else if (!display_ep_found && ep_dir == USB_DIR_OUT &&
ep_type == USB_ENDPOINT_XFER_INT) {
tx_endpoint = ep;
- display_ep_found = 1;
+ display_ep_found = true;
dev_dbg(ictx->dev, "%s: found display endpoint\n", __func__);
}
}
@@ -1835,8 +1836,8 @@ static bool imon_find_endpoints(struct imon_context *ictx,
* newer iMON devices that use control urb instead of interrupt
*/
if (!display_ep_found) {
- tx_control = 1;
- display_ep_found = 1;
+ tx_control = true;
+ display_ep_found = true;
dev_dbg(ictx->dev, "%s: device uses control endpoint, not "
"interface OUT endpoint\n", __func__);
}
@@ -1847,7 +1848,7 @@ static bool imon_find_endpoints(struct imon_context *ictx,
* and without... :\
*/
if (ictx->display_type == IMON_DISPLAY_TYPE_NONE) {
- display_ep_found = 0;
+ display_ep_found = false;
dev_dbg(ictx->dev, "%s: device has no display\n", __func__);
}
@@ -1856,7 +1857,7 @@ static bool imon_find_endpoints(struct imon_context *ictx,
* that refers to e.g. /dev/lcd0 (a character device LCD or VFD).
*/
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
- display_ep_found = 0;
+ display_ep_found = false;
dev_dbg(ictx->dev, "%s: iMON Touch device found\n", __func__);
}
@@ -1905,9 +1906,10 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
ictx->dev = dev;
ictx->usbdev_intf0 = usb_get_dev(interface_to_usbdev(intf));
- ictx->dev_present_intf0 = 1;
+ ictx->dev_present_intf0 = true;
ictx->rx_urb_intf0 = rx_urb;
ictx->tx_urb = tx_urb;
+ ictx->rf_device = false;
ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor);
ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct);
@@ -1979,7 +1981,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
}
ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf));
- ictx->dev_present_intf1 = 1;
+ ictx->dev_present_intf1 = true;
ictx->rx_urb_intf1 = rx_urb;
ret = -ENODEV;
@@ -2047,6 +2049,12 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR");
ictx->display_supported = false;
break;
+ /* iMON 2.4G LT (usb stick), no display, iMON RF */
+ case 0x4e:
+ dev_info(ictx->dev, "0xffdc iMON 2.4G LT, iMON RF");
+ ictx->display_supported = false;
+ ictx->rf_device = true;
+ break;
/* iMON VFD, no IR (does have vol knob tho) */
case 0x35:
dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR");
@@ -2197,15 +2205,6 @@ static int __devinit imon_probe(struct usb_interface *interface,
goto fail;
}
- if (product == 0xffdc) {
- /* RF products *also* use 0xffdc... sigh... */
- sysfs_err = sysfs_create_group(&interface->dev.kobj,
- &imon_rf_attribute_group);
- if (sysfs_err)
- err("%s: Could not create RF sysfs entries(%d)",
- __func__, sysfs_err);
- }
-
} else {
/* this is the secondary interface on the device */
ictx = imon_init_intf1(interface, first_if_ctx);
@@ -2233,6 +2232,14 @@ static int __devinit imon_probe(struct usb_interface *interface,
imon_set_display_type(ictx, interface);
+ if (product == 0xffdc && ictx->rf_device) {
+ sysfs_err = sysfs_create_group(&interface->dev.kobj,
+ &imon_rf_attribute_group);
+ if (sysfs_err)
+ err("%s: Could not create RF sysfs entries(%d)",
+ __func__, sysfs_err);
+ }
+
if (ictx->display_supported)
imon_init_display(ictx, interface);
}
@@ -2297,7 +2304,7 @@ static void __devexit imon_disconnect(struct usb_interface *interface)
}
if (ifnum == 0) {
- ictx->dev_present_intf0 = 0;
+ ictx->dev_present_intf0 = false;
usb_kill_urb(ictx->rx_urb_intf0);
input_unregister_device(ictx->idev);
if (ictx->display_supported) {
@@ -2307,7 +2314,7 @@ static void __devexit imon_disconnect(struct usb_interface *interface)
usb_deregister_dev(interface, &imon_vfd_class);
}
} else {
- ictx->dev_present_intf1 = 0;
+ ictx->dev_present_intf1 = false;
usb_kill_urb(ictx->rx_urb_intf1);
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA)
input_unregister_device(ictx->touch);
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index 9374a006f43d..94a8577e72eb 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -490,11 +490,12 @@ int __ir_input_register(struct input_dev *input_dev,
if (rc < 0)
goto out_table;
- if (ir_dev->props->driver_type == RC_DRIVER_IR_RAW) {
- rc = ir_raw_event_register(input_dev);
- if (rc < 0)
- goto out_event;
- }
+ if (ir_dev->props)
+ if (ir_dev->props->driver_type == RC_DRIVER_IR_RAW) {
+ rc = ir_raw_event_register(input_dev);
+ if (rc < 0)
+ goto out_event;
+ }
IR_dprintk(1, "Registered input device on %s for %s remote.\n",
driver_name, rc_tab->name);
@@ -530,8 +531,10 @@ void ir_input_unregister(struct input_dev *input_dev)
IR_dprintk(1, "Freed keycode table\n");
del_timer_sync(&ir_dev->timer_keyup);
- if (ir_dev->props->driver_type == RC_DRIVER_IR_RAW)
- ir_raw_event_unregister(input_dev);
+ if (ir_dev->props)
+ if (ir_dev->props->driver_type == RC_DRIVER_IR_RAW)
+ ir_raw_event_unregister(input_dev);
+
rc_tab = &ir_dev->rc_tab;
rc_tab->size = 0;
kfree(rc_tab->scan);
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c
index d7da63e16c92..2098dd1488e0 100644
--- a/drivers/media/IR/ir-sysfs.c
+++ b/drivers/media/IR/ir-sysfs.c
@@ -221,9 +221,10 @@ int ir_register_class(struct input_dev *input_dev)
if (unlikely(devno < 0))
return devno;
- if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
- ir_dev->dev.type = &rc_dev_type;
- else
+ if (ir_dev->props) {
+ if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
+ ir_dev->dev.type = &rc_dev_type;
+ } else
ir_dev->dev.type = &ir_raw_dev_type;
ir_dev->dev.class = &ir_input_class;
diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile
index ec25258a955f..aea649fbcf5a 100644
--- a/drivers/media/IR/keymaps/Makefile
+++ b/drivers/media/IR/keymaps/Makefile
@@ -6,7 +6,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-avermedia.o \
rc-avermedia-cardbus.o \
rc-avermedia-dvbt.o \
- rc-avermedia-m135a-rm-jx.o \
+ rc-avermedia-m135a.o \
+ rc-avermedia-m733a-rm-k6.o \
rc-avertv-303.o \
rc-behold.o \
rc-behold-columbus.o \
diff --git a/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c b/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c
deleted file mode 100644
index 101e7ea85941..000000000000
--- a/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/* avermedia-m135a-rm-jx.h - Keytable for avermedia_m135a_rm_jx Remote Controller
- *
- * keymap imported from ir-keymaps.c
- *
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <media/rc-map.h>
-
-/*
- * Avermedia M135A with IR model RM-JX
- * The same codes exist on both Positivo (BR) and original IR
- * Mauro Carvalho Chehab <mchehab@infradead.org>
- */
-
-static struct ir_scancode avermedia_m135a_rm_jx[] = {
- { 0x0200, KEY_POWER2 },
- { 0x022e, KEY_DOT }, /* '.' */
- { 0x0201, KEY_MODE }, /* TV/FM or SOURCE */
-
- { 0x0205, KEY_1 },
- { 0x0206, KEY_2 },
- { 0x0207, KEY_3 },
- { 0x0209, KEY_4 },
- { 0x020a, KEY_5 },
- { 0x020b, KEY_6 },
- { 0x020d, KEY_7 },
- { 0x020e, KEY_8 },
- { 0x020f, KEY_9 },
- { 0x0211, KEY_0 },
-
- { 0x0213, KEY_RIGHT }, /* -> or L */
- { 0x0212, KEY_LEFT }, /* <- or R */
-
- { 0x0217, KEY_SLEEP }, /* Capturar Imagem or Snapshot */
- { 0x0210, KEY_SHUFFLE }, /* Amostra or 16 chan prev */
-
- { 0x0303, KEY_CHANNELUP },
- { 0x0302, KEY_CHANNELDOWN },
- { 0x021f, KEY_VOLUMEUP },
- { 0x021e, KEY_VOLUMEDOWN },
- { 0x020c, KEY_ENTER }, /* Full Screen */
-
- { 0x0214, KEY_MUTE },
- { 0x0208, KEY_AUDIO },
-
- { 0x0203, KEY_TEXT }, /* Teletext */
- { 0x0204, KEY_EPG },
- { 0x022b, KEY_TV2 }, /* TV2 or PIP */
-
- { 0x021d, KEY_RED },
- { 0x021c, KEY_YELLOW },
- { 0x0301, KEY_GREEN },
- { 0x0300, KEY_BLUE },
-
- { 0x021a, KEY_PLAYPAUSE },
- { 0x0219, KEY_RECORD },
- { 0x0218, KEY_PLAY },
- { 0x021b, KEY_STOP },
-};
-
-static struct rc_keymap avermedia_m135a_rm_jx_map = {
- .map = {
- .scan = avermedia_m135a_rm_jx,
- .size = ARRAY_SIZE(avermedia_m135a_rm_jx),
- .ir_type = IR_TYPE_NEC,
- .name = RC_MAP_AVERMEDIA_M135A_RM_JX,
- }
-};
-
-static int __init init_rc_map_avermedia_m135a_rm_jx(void)
-{
- return ir_register_map(&avermedia_m135a_rm_jx_map);
-}
-
-static void __exit exit_rc_map_avermedia_m135a_rm_jx(void)
-{
- ir_unregister_map(&avermedia_m135a_rm_jx_map);
-}
-
-module_init(init_rc_map_avermedia_m135a_rm_jx)
-module_exit(exit_rc_map_avermedia_m135a_rm_jx)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-m135a.c b/drivers/media/IR/keymaps/rc-avermedia-m135a.c
new file mode 100644
index 000000000000..e4471fb2ad1e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-m135a.c
@@ -0,0 +1,147 @@
+/* avermedia-m135a.c - Keytable for Avermedia M135A Remote Controllers
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (c) 2010 by Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Avermedia M135A with RM-JX and RM-K6 remote controls
+ *
+ * On Avermedia M135A with IR model RM-JX, the same codes exist on both
+ * Positivo (BR) and original IR, initial version and remote control codes
+ * added by Mauro Carvalho Chehab <mchehab@infradead.org>
+ *
+ * Positivo also ships Avermedia M135A with model RM-K6, extra control
+ * codes added by Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+ */
+
+static struct ir_scancode avermedia_m135a[] = {
+ /* RM-JX */
+ { 0x0200, KEY_POWER2 },
+ { 0x022e, KEY_DOT }, /* '.' */
+ { 0x0201, KEY_MODE }, /* TV/FM or SOURCE */
+
+ { 0x0205, KEY_1 },
+ { 0x0206, KEY_2 },
+ { 0x0207, KEY_3 },
+ { 0x0209, KEY_4 },
+ { 0x020a, KEY_5 },
+ { 0x020b, KEY_6 },
+ { 0x020d, KEY_7 },
+ { 0x020e, KEY_8 },
+ { 0x020f, KEY_9 },
+ { 0x0211, KEY_0 },
+
+ { 0x0213, KEY_RIGHT }, /* -> or L */
+ { 0x0212, KEY_LEFT }, /* <- or R */
+
+ { 0x0217, KEY_SLEEP }, /* Capturar Imagem or Snapshot */
+ { 0x0210, KEY_SHUFFLE }, /* Amostra or 16 chan prev */
+
+ { 0x0303, KEY_CHANNELUP },
+ { 0x0302, KEY_CHANNELDOWN },
+ { 0x021f, KEY_VOLUMEUP },
+ { 0x021e, KEY_VOLUMEDOWN },
+ { 0x020c, KEY_ENTER }, /* Full Screen */
+
+ { 0x0214, KEY_MUTE },
+ { 0x0208, KEY_AUDIO },
+
+ { 0x0203, KEY_TEXT }, /* Teletext */
+ { 0x0204, KEY_EPG },
+ { 0x022b, KEY_TV2 }, /* TV2 or PIP */
+
+ { 0x021d, KEY_RED },
+ { 0x021c, KEY_YELLOW },
+ { 0x0301, KEY_GREEN },
+ { 0x0300, KEY_BLUE },
+
+ { 0x021a, KEY_PLAYPAUSE },
+ { 0x0219, KEY_RECORD },
+ { 0x0218, KEY_PLAY },
+ { 0x021b, KEY_STOP },
+
+ /* RM-K6 */
+ { 0x0401, KEY_POWER2 },
+ { 0x0406, KEY_MUTE },
+ { 0x0408, KEY_MODE }, /* TV/FM */
+
+ { 0x0409, KEY_1 },
+ { 0x040a, KEY_2 },
+ { 0x040b, KEY_3 },
+ { 0x040c, KEY_4 },
+ { 0x040d, KEY_5 },
+ { 0x040e, KEY_6 },
+ { 0x040f, KEY_7 },
+ { 0x0410, KEY_8 },
+ { 0x0411, KEY_9 },
+ { 0x044c, KEY_DOT }, /* '.' */
+ { 0x0412, KEY_0 },
+ { 0x0407, KEY_REFRESH }, /* Refresh/Reload */
+
+ { 0x0413, KEY_AUDIO },
+ { 0x0440, KEY_SCREEN }, /* Full Screen toggle */
+ { 0x0441, KEY_HOME },
+ { 0x0442, KEY_BACK },
+ { 0x0447, KEY_UP },
+ { 0x0448, KEY_DOWN },
+ { 0x0449, KEY_LEFT },
+ { 0x044a, KEY_RIGHT },
+ { 0x044b, KEY_OK },
+ { 0x0404, KEY_VOLUMEUP },
+ { 0x0405, KEY_VOLUMEDOWN },
+ { 0x0402, KEY_CHANNELUP },
+ { 0x0403, KEY_CHANNELDOWN },
+
+ { 0x0443, KEY_RED },
+ { 0x0444, KEY_GREEN },
+ { 0x0445, KEY_YELLOW },
+ { 0x0446, KEY_BLUE },
+
+ { 0x0414, KEY_TEXT },
+ { 0x0415, KEY_EPG },
+ { 0x041a, KEY_TV2 }, /* PIP */
+ { 0x041b, KEY_MHP }, /* Snapshot */
+
+ { 0x0417, KEY_RECORD },
+ { 0x0416, KEY_PLAYPAUSE },
+ { 0x0418, KEY_STOP },
+ { 0x0419, KEY_PAUSE },
+
+ { 0x041f, KEY_PREVIOUS },
+ { 0x041c, KEY_REWIND },
+ { 0x041d, KEY_FORWARD },
+ { 0x041e, KEY_NEXT },
+};
+
+static struct rc_keymap avermedia_m135a_map = {
+ .map = {
+ .scan = avermedia_m135a,
+ .size = ARRAY_SIZE(avermedia_m135a),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_AVERMEDIA_M135A,
+ }
+};
+
+static int __init init_rc_map_avermedia_m135a(void)
+{
+ return ir_register_map(&avermedia_m135a_map);
+}
+
+static void __exit exit_rc_map_avermedia_m135a(void)
+{
+ ir_unregister_map(&avermedia_m135a_map);
+}
+
+module_init(init_rc_map_avermedia_m135a)
+module_exit(exit_rc_map_avermedia_m135a)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-m733a-rm-k6.c b/drivers/media/IR/keymaps/rc-avermedia-m733a-rm-k6.c
new file mode 100644
index 000000000000..cf8d45717cb3
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-m733a-rm-k6.c
@@ -0,0 +1,95 @@
+/* avermedia-m733a-rm-k6.h - Keytable for avermedia_m733a_rm_k6 Remote Controller
+ *
+ * Copyright (c) 2010 by Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Avermedia M733A with IR model RM-K6
+ * This is the stock remote controller used with Positivo machines with M733A
+ * Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+ */
+
+static struct ir_scancode avermedia_m733a_rm_k6[] = {
+ { 0x0401, KEY_POWER2 },
+ { 0x0406, KEY_MUTE },
+ { 0x0408, KEY_MODE }, /* TV/FM */
+
+ { 0x0409, KEY_1 },
+ { 0x040a, KEY_2 },
+ { 0x040b, KEY_3 },
+ { 0x040c, KEY_4 },
+ { 0x040d, KEY_5 },
+ { 0x040e, KEY_6 },
+ { 0x040f, KEY_7 },
+ { 0x0410, KEY_8 },
+ { 0x0411, KEY_9 },
+ { 0x044c, KEY_DOT }, /* '.' */
+ { 0x0412, KEY_0 },
+ { 0x0407, KEY_REFRESH }, /* Refresh/Reload */
+
+ { 0x0413, KEY_AUDIO },
+ { 0x0440, KEY_SCREEN }, /* Full Screen toggle */
+ { 0x0441, KEY_HOME },
+ { 0x0442, KEY_BACK },
+ { 0x0447, KEY_UP },
+ { 0x0448, KEY_DOWN },
+ { 0x0449, KEY_LEFT },
+ { 0x044a, KEY_RIGHT },
+ { 0x044b, KEY_OK },
+ { 0x0404, KEY_VOLUMEUP },
+ { 0x0405, KEY_VOLUMEDOWN },
+ { 0x0402, KEY_CHANNELUP },
+ { 0x0403, KEY_CHANNELDOWN },
+
+ { 0x0443, KEY_RED },
+ { 0x0444, KEY_GREEN },
+ { 0x0445, KEY_YELLOW },
+ { 0x0446, KEY_BLUE },
+
+ { 0x0414, KEY_TEXT },
+ { 0x0415, KEY_EPG },
+ { 0x041a, KEY_TV2 }, /* PIP */
+ { 0x041b, KEY_MHP }, /* Snapshot */
+
+ { 0x0417, KEY_RECORD },
+ { 0x0416, KEY_PLAYPAUSE },
+ { 0x0418, KEY_STOP },
+ { 0x0419, KEY_PAUSE },
+
+ { 0x041f, KEY_PREVIOUS },
+ { 0x041c, KEY_REWIND },
+ { 0x041d, KEY_FORWARD },
+ { 0x041e, KEY_NEXT },
+};
+
+static struct rc_keymap avermedia_m733a_rm_k6_map = {
+ .map = {
+ .scan = avermedia_m733a_rm_k6,
+ .size = ARRAY_SIZE(avermedia_m733a_rm_k6),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_AVERMEDIA_M733A_RM_K6,
+ }
+};
+
+static int __init init_rc_map_avermedia_m733a_rm_k6(void)
+{
+ return ir_register_map(&avermedia_m733a_rm_k6_map);
+}
+
+static void __exit exit_rc_map_avermedia_m733a_rm_k6(void)
+{
+ ir_unregister_map(&avermedia_m733a_rm_k6_map);
+}
+
+module_init(init_rc_map_avermedia_m733a_rm_k6)
+module_exit(exit_rc_map_avermedia_m733a_rm_k6)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index b762e561a6d5..bca07c0bcd01 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -594,7 +594,7 @@ static irqreturn_t dm1105_irq(int irq, void *dev_id)
int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
{
struct input_dev *input_dev;
- char *ir_codes = NULL;
+ char *ir_codes = RC_MAP_DM1105_NEC;
int err = -ENOMEM;
input_dev = input_allocate_device();
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 9ddc57909d49..425862ffb285 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
+#include <linux/smp_lock.h>
#include <linux/poll.h>
#include <linux/ioctl.h>
#include <linux/wait.h>
@@ -963,7 +964,7 @@ dvb_demux_read(struct file *file, char __user *buf, size_t count,
return ret;
}
-static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_demux_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dmxdev_filter *dmxdevfilter = file->private_data;
@@ -1084,10 +1085,16 @@ static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
return ret;
}
-static int dvb_demux_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_demux_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
@@ -1139,7 +1146,7 @@ static int dvb_demux_release(struct inode *inode, struct file *file)
static const struct file_operations dvb_demux_fops = {
.owner = THIS_MODULE,
.read = dvb_demux_read,
- .ioctl = dvb_demux_ioctl,
+ .unlocked_ioctl = dvb_demux_ioctl,
.open = dvb_demux_open,
.release = dvb_demux_release,
.poll = dvb_demux_poll,
@@ -1152,7 +1159,7 @@ static struct dvb_device dvbdev_demux = {
.fops = &dvb_demux_fops
};
-static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_dvr_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1176,10 +1183,16 @@ static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file,
return ret;
}
-static int dvb_dvr_ioctl(struct inode *inode, struct file *file,
+static long dvb_dvr_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_dvr_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
@@ -1208,7 +1221,7 @@ static const struct file_operations dvb_dvr_fops = {
.owner = THIS_MODULE,
.read = dvb_dvr_read,
.write = dvb_dvr_write,
- .ioctl = dvb_dvr_ioctl,
+ .unlocked_ioctl = dvb_dvr_ioctl,
.open = dvb_dvr_open,
.release = dvb_dvr_release,
.poll = dvb_dvr_poll,
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index cb22da53bfb0..ef259a0718ac 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
+#include <linux/smp_lock.h>
#include <linux/kthread.h>
#include "dvb_ca_en50221.h"
@@ -1181,7 +1182,7 @@ static int dvb_ca_en50221_thread(void *data)
*
* @return 0 on success, <0 on error.
*/
-static int dvb_ca_en50221_io_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_ca_en50221_io_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1255,10 +1256,16 @@ static int dvb_ca_en50221_io_do_ioctl(struct inode *inode, struct file *file,
*
* @return 0 on success, <0 on error.
*/
-static int dvb_ca_en50221_io_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long dvb_ca_en50221_io_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_ca_en50221_io_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_ca_en50221_io_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
@@ -1611,7 +1618,7 @@ static const struct file_operations dvb_ca_fops = {
.owner = THIS_MODULE,
.read = dvb_ca_en50221_io_read,
.write = dvb_ca_en50221_io_write,
- .ioctl = dvb_ca_en50221_io_ioctl,
+ .unlocked_ioctl = dvb_ca_en50221_io_ioctl,
.open = dvb_ca_en50221_io_open,
.release = dvb_ca_en50221_io_release,
.poll = dvb_ca_en50221_io_poll,
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 6932def4d266..44ae89ecef94 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -36,6 +36,7 @@
#include <linux/list.h>
#include <linux/freezer.h>
#include <linux/jiffies.h>
+#include <linux/smp_lock.h>
#include <linux/kthread.h>
#include <asm/processor.h>
@@ -1195,14 +1196,14 @@ static void dtv_property_cache_submit(struct dvb_frontend *fe)
}
}
-static int dvb_frontend_ioctl_legacy(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_legacy(struct file *file,
unsigned int cmd, void *parg);
-static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_properties(struct file *file,
unsigned int cmd, void *parg);
static int dtv_property_process_get(struct dvb_frontend *fe,
struct dtv_property *tvp,
- struct inode *inode, struct file *file)
+ struct file *file)
{
int r = 0;
@@ -1335,7 +1336,6 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
static int dtv_property_process_set(struct dvb_frontend *fe,
struct dtv_property *tvp,
- struct inode *inode,
struct file *file)
{
int r = 0;
@@ -1366,7 +1366,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
dprintk("%s() Finalised property cache\n", __func__);
dtv_property_cache_submit(fe);
- r |= dvb_frontend_ioctl_legacy(inode, file, FE_SET_FRONTEND,
+ r |= dvb_frontend_ioctl_legacy(file, FE_SET_FRONTEND,
&fepriv->parameters);
break;
case DTV_FREQUENCY:
@@ -1398,12 +1398,12 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
break;
case DTV_VOLTAGE:
fe->dtv_property_cache.voltage = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(inode, file, FE_SET_VOLTAGE,
+ r = dvb_frontend_ioctl_legacy(file, FE_SET_VOLTAGE,
(void *)fe->dtv_property_cache.voltage);
break;
case DTV_TONE:
fe->dtv_property_cache.sectone = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(inode, file, FE_SET_TONE,
+ r = dvb_frontend_ioctl_legacy(file, FE_SET_TONE,
(void *)fe->dtv_property_cache.sectone);
break;
case DTV_CODE_RATE_HP:
@@ -1487,7 +1487,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
return r;
}
-static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1509,17 +1509,17 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
return -ERESTARTSYS;
if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY))
- err = dvb_frontend_ioctl_properties(inode, file, cmd, parg);
+ err = dvb_frontend_ioctl_properties(file, cmd, parg);
else {
fe->dtv_property_cache.state = DTV_UNDEFINED;
- err = dvb_frontend_ioctl_legacy(inode, file, cmd, parg);
+ err = dvb_frontend_ioctl_legacy(file, cmd, parg);
}
up(&fepriv->sem);
return err;
}
-static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_properties(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1555,7 +1555,7 @@ static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
}
for (i = 0; i < tvps->num; i++) {
- (tvp + i)->result = dtv_property_process_set(fe, tvp + i, inode, file);
+ (tvp + i)->result = dtv_property_process_set(fe, tvp + i, file);
err |= (tvp + i)->result;
}
@@ -1587,7 +1587,7 @@ static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
}
for (i = 0; i < tvps->num; i++) {
- (tvp + i)->result = dtv_property_process_get(fe, tvp + i, inode, file);
+ (tvp + i)->result = dtv_property_process_get(fe, tvp + i, file);
err |= (tvp + i)->result;
}
@@ -1604,7 +1604,7 @@ out:
return err;
}
-static int dvb_frontend_ioctl_legacy(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_legacy(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -2031,7 +2031,7 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
static const struct file_operations dvb_frontend_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.poll = dvb_frontend_poll,
.open = dvb_frontend_open,
.release = dvb_frontend_release
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index cccea412088b..6c3a8a06ccab 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -59,6 +59,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dvb/net.h>
+#include <linux/smp_lock.h>
#include <linux/uio.h>
#include <asm/uaccess.h>
#include <linux/crc32.h>
@@ -350,6 +351,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
const u8 *ts, *ts_end, *from_where = NULL;
u8 ts_remain = 0, how_much = 0, new_ts = 1;
struct ethhdr *ethh = NULL;
+ bool error = false;
#ifdef ULE_DEBUG
/* The code inside ULE_DEBUG keeps a history of the last 100 TS cells processed. */
@@ -459,10 +461,16 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
/* Drop partly decoded SNDU, reset state, resync on PUSI. */
if (priv->ule_skb) {
- dev_kfree_skb( priv->ule_skb );
+ error = true;
+ dev_kfree_skb(priv->ule_skb);
+ }
+
+ if (error || priv->ule_sndu_remain) {
dev->stats.rx_errors++;
dev->stats.rx_frame_errors++;
+ error = false;
}
+
reset_ule(priv);
priv->need_pusi = 1;
continue;
@@ -534,6 +542,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
from_where += 2;
}
+ priv->ule_sndu_remain = priv->ule_sndu_len + 2;
/*
* State of current TS:
* ts_remain (remaining bytes in the current TS cell)
@@ -543,6 +552,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
*/
switch (ts_remain) {
case 1:
+ priv->ule_sndu_remain--;
priv->ule_sndu_type = from_where[0] << 8;
priv->ule_sndu_type_1 = 1; /* first byte of ule_type is set. */
ts_remain -= 1; from_where += 1;
@@ -556,6 +566,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
default: /* complete ULE header is present in current TS. */
/* Extract ULE type field. */
if (priv->ule_sndu_type_1) {
+ priv->ule_sndu_type_1 = 0;
priv->ule_sndu_type |= from_where[0];
from_where += 1; /* points to payload start. */
ts_remain -= 1;
@@ -1329,7 +1340,7 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
return 0;
}
-static int dvb_net_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_net_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1431,10 +1442,16 @@ static int dvb_net_do_ioctl(struct inode *inode, struct file *file,
return 0;
}
-static int dvb_net_ioctl(struct inode *inode, struct file *file,
+static long dvb_net_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_net_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
static int dvb_net_close(struct inode *inode, struct file *file)
@@ -1455,7 +1472,7 @@ static int dvb_net_close(struct inode *inode, struct file *file)
static const struct file_operations dvb_net_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_net_ioctl,
+ .unlocked_ioctl = dvb_net_ioctl,
.open = dvb_generic_open,
.release = dvb_net_close,
};
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 94159b90f733..b915c39d782f 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -154,10 +154,11 @@ int dvb_generic_release(struct inode *inode, struct file *file)
EXPORT_SYMBOL(dvb_generic_release);
-int dvb_generic_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+long dvb_generic_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
{
struct dvb_device *dvbdev = file->private_data;
+ int ret;
if (!dvbdev)
return -ENODEV;
@@ -165,7 +166,11 @@ int dvb_generic_ioctl(struct inode *inode, struct file *file,
if (!dvbdev->kernel_ioctl)
return -EINVAL;
- return dvb_usercopy (inode, file, cmd, arg, dvbdev->kernel_ioctl);
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvbdev->kernel_ioctl);
+ unlock_kernel();
+
+ return ret;
}
EXPORT_SYMBOL(dvb_generic_ioctl);
@@ -377,9 +382,9 @@ EXPORT_SYMBOL(dvb_unregister_adapter);
define this as video_usercopy(). this will introduce a dependecy
to the v4l "videodev.o" module, which is unnecessary for some
cards (ie. the budget dvb-cards don't need the v4l module...) */
-int dvb_usercopy(struct inode *inode, struct file *file,
+int dvb_usercopy(struct file *file,
unsigned int cmd, unsigned long arg,
- int (*func)(struct inode *inode, struct file *file,
+ int (*func)(struct file *file,
unsigned int cmd, void *arg))
{
char sbuf[128];
@@ -416,7 +421,7 @@ int dvb_usercopy(struct inode *inode, struct file *file,
}
/* call driver */
- if ((err = func(inode, file, cmd, parg)) == -ENOIOCTLCMD)
+ if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD)
err = -EINVAL;
if (err < 0)
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h
index f7b499d4a3c0..fcc6ae98745e 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.h
+++ b/drivers/media/dvb/dvb-core/dvbdev.h
@@ -116,8 +116,7 @@ struct dvb_device {
wait_queue_head_t wait_queue;
/* don't really need those !? -- FIXME: use video_usercopy */
- int (*kernel_ioctl)(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg);
+ int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg);
void *priv;
};
@@ -138,17 +137,15 @@ extern void dvb_unregister_device (struct dvb_device *dvbdev);
extern int dvb_generic_open (struct inode *inode, struct file *file);
extern int dvb_generic_release (struct inode *inode, struct file *file);
-extern int dvb_generic_ioctl (struct inode *inode, struct file *file,
+extern long dvb_generic_ioctl (struct file *file,
unsigned int cmd, unsigned long arg);
/* we don't mess with video_usercopy() any more,
we simply define out own dvb_usercopy(), which will hopefully become
generic_usercopy() someday... */
-extern int dvb_usercopy(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg,
- int (*func)(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg));
+extern int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+ int (*func)(struct file *file, unsigned int cmd, void *arg));
/** generic DVB attach function. */
#ifdef CONFIG_MEDIA_ATTACH
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index e5f91f16ffa4..553b48ac1919 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -76,6 +76,7 @@ config DVB_USB_DIB0700
select DVB_S5H1411 if !DVB_FE_CUSTOMISE
select DVB_LGDT3305 if !DVB_FE_CUSTOMISE
select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE
+ select DVB_TUNER_DIB0090 if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
@@ -134,6 +135,7 @@ config DVB_USB_M920X
select DVB_TDA1004X if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE
help
Say Y here to support the MSI Mega Sky 580 USB2.0 DVB-T receiver.
Currently, only devices with a product id of
@@ -264,7 +266,7 @@ config DVB_USB_DW2102
select DVB_STB6000 if !DVB_FE_CUSTOMISE
select DVB_CX24116 if !DVB_FE_CUSTOMISE
select DVB_SI21XX if !DVB_FE_CUSTOMISE
- select DVB_TDA10021 if !DVB_FE_CUSTOMISE
+ select DVB_TDA10023 if !DVB_FE_CUSTOMISE
select DVB_MT312 if !DVB_FE_CUSTOMISE
select DVB_ZL10039 if !DVB_FE_CUSTOMISE
select DVB_DS3000 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 0eb490889162..11e9e85dac86 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -1026,8 +1026,10 @@ static int cxusb_dualdig4_rev2_frontend_attach(struct dvb_usb_adapter *adap)
cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1);
if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
- &cxusb_dualdig4_rev2_config) < 0)
+ &cxusb_dualdig4_rev2_config) < 0) {
+ printk(KERN_WARNING "Unable to enumerate dib7000p\n");
return -ENODEV;
+ }
adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
&cxusb_dualdig4_rev2_config);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 085c4e457e0e..b4afe6f8ed19 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -198,6 +198,7 @@
#define USB_PID_AVERMEDIA_A850 0x850a
#define USB_PID_AVERMEDIA_A805 0xa805
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
+#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058
diff --git a/drivers/media/dvb/dvb-usb/ttusb2.c b/drivers/media/dvb/dvb-usb/ttusb2.c
index 20ca9d9ee99b..a6de489a6a39 100644
--- a/drivers/media/dvb/dvb-usb/ttusb2.c
+++ b/drivers/media/dvb/dvb-usb/ttusb2.c
@@ -29,6 +29,8 @@
#include "tda826x.h"
#include "tda10086.h"
+#include "tda1002x.h"
+#include "tda827x.h"
#include "lnbp21.h"
/* debug */
@@ -150,7 +152,17 @@ static struct tda10086_config tda10086_config = {
.xtal_freq = TDA10086_XTAL_16M,
};
-static int ttusb2_frontend_attach(struct dvb_usb_adapter *adap)
+static struct tda10023_config tda10023_config = {
+ .demod_address = 0x0c,
+ .invert = 0,
+ .xtal = 16000000,
+ .pll_m = 11,
+ .pll_p = 3,
+ .pll_n = 1,
+ .deltaf = 0xa511,
+};
+
+static int ttusb2_frontend_tda10086_attach(struct dvb_usb_adapter *adap)
{
if (usb_set_interface(adap->dev->udev,0,3) < 0)
err("set interface to alts=3 failed");
@@ -163,7 +175,27 @@ static int ttusb2_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static int ttusb2_tuner_attach(struct dvb_usb_adapter *adap)
+static int ttusb2_frontend_tda10023_attach(struct dvb_usb_adapter *adap)
+{
+ if (usb_set_interface(adap->dev->udev, 0, 3) < 0)
+ err("set interface to alts=3 failed");
+ if ((adap->fe = dvb_attach(tda10023_attach, &tda10023_config, &adap->dev->i2c_adap, 0x48)) == NULL) {
+ deb_info("TDA10023 attach failed\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int ttusb2_tuner_tda827x_attach(struct dvb_usb_adapter *adap)
+{
+ if (dvb_attach(tda827x_attach, adap->fe, 0x61, &adap->dev->i2c_adap, NULL) == NULL) {
+ printk(KERN_ERR "%s: No tda827x found!\n", __func__);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int ttusb2_tuner_tda826x_attach(struct dvb_usb_adapter *adap)
{
if (dvb_attach(tda826x_attach, adap->fe, 0x60, &adap->dev->i2c_adap, 0) == NULL) {
deb_info("TDA8263 attach failed\n");
@@ -180,6 +212,7 @@ static int ttusb2_tuner_attach(struct dvb_usb_adapter *adap)
/* DVB USB Driver stuff */
static struct dvb_usb_device_properties ttusb2_properties;
static struct dvb_usb_device_properties ttusb2_properties_s2400;
+static struct dvb_usb_device_properties ttusb2_properties_ct3650;
static int ttusb2_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -187,6 +220,8 @@ static int ttusb2_probe(struct usb_interface *intf,
if (0 == dvb_usb_device_init(intf, &ttusb2_properties,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &ttusb2_properties_s2400,
+ THIS_MODULE, NULL, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf, &ttusb2_properties_ct3650,
THIS_MODULE, NULL, adapter_nr))
return 0;
return -ENODEV;
@@ -197,6 +232,8 @@ static struct usb_device_id ttusb2_table [] = {
{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PCTV_450E) },
{ USB_DEVICE(USB_VID_TECHNOTREND,
USB_PID_TECHNOTREND_CONNECT_S2400) },
+ { USB_DEVICE(USB_VID_TECHNOTREND,
+ USB_PID_TECHNOTREND_CONNECT_CT3650) },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, ttusb2_table);
@@ -214,8 +251,8 @@ static struct dvb_usb_device_properties ttusb2_properties = {
{
.streaming_ctrl = NULL, // ttusb2_streaming_ctrl,
- .frontend_attach = ttusb2_frontend_attach,
- .tuner_attach = ttusb2_tuner_attach,
+ .frontend_attach = ttusb2_frontend_tda10086_attach,
+ .tuner_attach = ttusb2_tuner_tda826x_attach,
/* parameter for the MPEG2-data transfer */
.stream = {
@@ -266,8 +303,8 @@ static struct dvb_usb_device_properties ttusb2_properties_s2400 = {
{
.streaming_ctrl = NULL,
- .frontend_attach = ttusb2_frontend_attach,
- .tuner_attach = ttusb2_tuner_attach,
+ .frontend_attach = ttusb2_frontend_tda10086_attach,
+ .tuner_attach = ttusb2_tuner_tda826x_attach,
/* parameter for the MPEG2-data transfer */
.stream = {
@@ -301,6 +338,52 @@ static struct dvb_usb_device_properties ttusb2_properties_s2400 = {
}
};
+static struct dvb_usb_device_properties ttusb2_properties_ct3650 = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = CYPRESS_FX2,
+
+ .size_of_priv = sizeof(struct ttusb2_state),
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .streaming_ctrl = NULL,
+
+ .frontend_attach = ttusb2_frontend_tda10023_attach,
+ .tuner_attach = ttusb2_tuner_tda827x_attach,
+
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_ISOC,
+ .count = 5,
+ .endpoint = 0x02,
+ .u = {
+ .isoc = {
+ .framesperurb = 4,
+ .framesize = 940,
+ .interval = 1,
+ }
+ }
+ }
+ },
+ },
+
+ .power_ctrl = ttusb2_power_ctrl,
+ .identify_state = ttusb2_identify_state,
+
+ .i2c_algo = &ttusb2_i2c_algo,
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+
+ .num_device_descs = 1,
+ .devices = {
+ { "Technotrend TT-connect CT-3650",
+ .warm_ids = { &ttusb2_table[3], NULL },
+ },
+ }
+};
+
static struct usb_driver ttusb2_driver = {
.name = "dvb_usb_ttusb2",
.probe = ttusb2_probe,
diff --git a/drivers/media/dvb/firewire/firedtv-1394.c b/drivers/media/dvb/firewire/firedtv-1394.c
index 26333b4f4d3e..b34ca7afb0e6 100644
--- a/drivers/media/dvb/firewire/firedtv-1394.c
+++ b/drivers/media/dvb/firewire/firedtv-1394.c
@@ -58,7 +58,7 @@ static void rawiso_activity_cb(struct hpsb_iso *iso)
num = hpsb_iso_n_ready(iso);
if (!fdtv) {
- dev_err(fdtv->device, "received at unknown iso channel\n");
+ pr_err("received at unknown iso channel\n");
goto out;
}
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
index 853e04b7cb36..d3c2cf60de76 100644
--- a/drivers/media/dvb/firewire/firedtv-ci.c
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -175,8 +175,7 @@ static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
return err;
}
-static int fdtv_ca_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg)
+static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct dvb_device *dvbdev = file->private_data;
struct firedtv *fdtv = dvbdev->priv;
@@ -217,7 +216,7 @@ static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
static const struct file_operations fdtv_ca_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_generic_open,
.release = dvb_generic_release,
.poll = fdtv_ca_io_poll,
diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c
index 68dba3a4b4da..29cdbfe36852 100644
--- a/drivers/media/dvb/frontends/au8522_decoder.c
+++ b/drivers/media/dvb/frontends/au8522_decoder.c
@@ -567,30 +567,6 @@ static int au8522_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
/* ----------------------------------------------------------------------- */
-static int au8522_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
-{
- switch (fmt->type) {
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int au8522_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
-{
- switch (fmt->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- /* Not yet implemented */
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* ----------------------------------------------------------------------- */
-
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int au8522_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
@@ -772,8 +748,6 @@ static const struct v4l2_subdev_audio_ops au8522_audio_ops = {
static const struct v4l2_subdev_video_ops au8522_video_ops = {
.s_routing = au8522_s_video_routing,
- .g_fmt = au8522_g_fmt,
- .s_fmt = au8522_s_fmt,
.s_stream = au8522_s_stream,
};
diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
index 78001e8bcdb7..fc61d9230db8 100644
--- a/drivers/media/dvb/frontends/ds3000.c
+++ b/drivers/media/dvb/frontends/ds3000.c
@@ -969,15 +969,12 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct ds3000_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct ds3000_state), GFP_KERNEL);
if (state == NULL) {
printk(KERN_ERR "Unable to kmalloc\n");
goto error2;
}
- /* setup the state */
- memset(state, 0, sizeof(struct ds3000_state));
-
state->config = config;
state->i2c = i2c;
state->prevUCBS2 = 0;
diff --git a/drivers/media/dvb/frontends/stv6110x.c b/drivers/media/dvb/frontends/stv6110x.c
index 42591ce1aaad..f36cab12bdc7 100644
--- a/drivers/media/dvb/frontends/stv6110x.c
+++ b/drivers/media/dvb/frontends/stv6110x.c
@@ -303,7 +303,10 @@ static int stv6110x_set_mode(struct dvb_frontend *fe, enum tuner_mode mode)
static int stv6110x_sleep(struct dvb_frontend *fe)
{
- return stv6110x_set_mode(fe, TUNER_SLEEP);
+ if (fe->tuner_priv)
+ return stv6110x_set_mode(fe, TUNER_SLEEP);
+
+ return 0;
}
static int stv6110x_get_status(struct dvb_frontend *fe, u32 *status)
diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
index 692c3e226e83..4692a41ad95b 100644
--- a/drivers/media/dvb/ngene/ngene-cards.c
+++ b/drivers/media/dvb/ngene/ngene-cards.c
@@ -217,6 +217,19 @@ static struct ngene_info ngene_info_cineS2v5 = {
.fw_version = 15,
};
+static struct ngene_info ngene_info_duoFlexS2 = {
+ .type = NGENE_SIDEWINDER,
+ .name = "Digital Devices DuoFlex S2 miniPCIe",
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0a, 0x08},
+ .tsf = {3, 3},
+ .fw_version = 15,
+};
+
static struct ngene_info ngene_info_m780 = {
.type = NGENE_APP,
.name = "Aver M780 ATSC/QAM-B",
@@ -256,6 +269,8 @@ static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
NGENE_ID(0x18c3, 0xdb02, ngene_info_satixS2v2),
NGENE_ID(0x18c3, 0xdd00, ngene_info_cineS2v5),
+ NGENE_ID(0x18c3, 0xdd10, ngene_info_duoFlexS2),
+ NGENE_ID(0x18c3, 0xdd20, ngene_info_duoFlexS2),
NGENE_ID(0x1461, 0x062e, ngene_info_m780),
{0}
};
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c
index c8b4dfa0ab5f..4caeb163a666 100644
--- a/drivers/media/dvb/ngene/ngene-core.c
+++ b/drivers/media/dvb/ngene/ngene-core.c
@@ -53,8 +53,6 @@ MODULE_PARM_DESC(debug, "Print debugging information.");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#define COMMAND_TIMEOUT_WORKAROUND
-
#define dprintk if (debug) printk
#define ngwriteb(dat, adr) writeb((dat), (char *)(dev->iomem + (adr)))
@@ -147,24 +145,24 @@ static void demux_tasklet(unsigned long data)
} else {
if (chan->HWState == HWSTATE_RUN) {
u32 Flags = 0;
+ IBufferExchange *exch1 = chan->pBufferExchange;
+ IBufferExchange *exch2 = chan->pBufferExchange2;
if (Cur->ngeneBuffer.SR.Flags & 0x01)
Flags |= BEF_EVEN_FIELD;
if (Cur->ngeneBuffer.SR.Flags & 0x20)
Flags |= BEF_OVERFLOW;
- if (chan->pBufferExchange)
- chan->pBufferExchange(chan,
- Cur->Buffer1,
- chan->
- Capture1Length,
- Cur->ngeneBuffer.
- SR.Clock, Flags);
- if (chan->pBufferExchange2)
- chan->pBufferExchange2(chan,
- Cur->Buffer2,
- chan->
- Capture2Length,
- Cur->ngeneBuffer.
- SR.Clock, Flags);
+ spin_unlock_irq(&chan->state_lock);
+ if (exch1)
+ exch1(chan, Cur->Buffer1,
+ chan->Capture1Length,
+ Cur->ngeneBuffer.SR.Clock,
+ Flags);
+ if (exch2)
+ exch2(chan, Cur->Buffer2,
+ chan->Capture2Length,
+ Cur->ngeneBuffer.SR.Clock,
+ Flags);
+ spin_lock_irq(&chan->state_lock);
} else if (chan->HWState != HWSTATE_STOP)
chan->HWState = HWSTATE_RUN;
}
@@ -572,11 +570,7 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
u16 BsSPI = ((stream & 1) ? 0x9800 : 0x9700);
u16 BsSDO = 0x9B00;
- /* down(&dev->stream_mutex); */
- while (down_trylock(&dev->stream_mutex)) {
- printk(KERN_INFO DEVICE_NAME ": SC locked\n");
- msleep(1);
- }
+ down(&dev->stream_mutex);
memset(&com, 0, sizeof(com));
com.cmd.hdr.Opcode = CMD_CONTROL;
com.cmd.hdr.Length = sizeof(struct FW_STREAM_CONTROL) - 2;
@@ -1252,14 +1246,17 @@ static int ngene_load_firm(struct ngene *dev)
version = 15;
size = 23466;
fw_name = "ngene_15.fw";
+ dev->cmd_timeout_workaround = true;
break;
case 16:
size = 23498;
fw_name = "ngene_16.fw";
+ dev->cmd_timeout_workaround = true;
break;
case 17:
size = 24446;
fw_name = "ngene_17.fw";
+ dev->cmd_timeout_workaround = true;
break;
}
@@ -1299,11 +1296,16 @@ static void ngene_stop(struct ngene *dev)
ngwritel(0, NGENE_EVENT);
ngwritel(0, NGENE_EVENT_HI);
free_irq(dev->pci_dev->irq, dev);
+#ifdef CONFIG_PCI_MSI
+ if (dev->msi_enabled)
+ pci_disable_msi(dev->pci_dev);
+#endif
}
static int ngene_start(struct ngene *dev)
{
int stat;
+ unsigned long flags;
int i;
pci_set_master(dev->pci_dev);
@@ -1333,6 +1335,28 @@ static int ngene_start(struct ngene *dev)
if (stat < 0)
goto fail;
+#ifdef CONFIG_PCI_MSI
+ /* enable MSI if kernel and card support it */
+ if (pci_msi_enabled() && dev->card_info->msi_supported) {
+ ngwritel(0, NGENE_INT_ENABLE);
+ free_irq(dev->pci_dev->irq, dev);
+ stat = pci_enable_msi(dev->pci_dev);
+ if (stat) {
+ printk(KERN_INFO DEVICE_NAME
+ ": MSI not available\n");
+ flags = IRQF_SHARED;
+ } else {
+ flags = 0;
+ dev->msi_enabled = true;
+ }
+ stat = request_irq(dev->pci_dev->irq, irq_handler,
+ flags, "nGene", dev);
+ if (stat < 0)
+ goto fail2;
+ ngwritel(1, NGENE_INT_ENABLE);
+ }
+#endif
+
stat = ngene_i2c_init(dev, 0);
if (stat < 0)
goto fail;
@@ -1358,10 +1382,18 @@ static int ngene_start(struct ngene *dev)
bconf = BUFFER_CONFIG_3333;
stat = ngene_command_config_buf(dev, bconf);
}
- return stat;
+ if (!stat)
+ return stat;
+
+ /* otherwise error: fall through */
fail:
ngwritel(0, NGENE_INT_ENABLE);
free_irq(dev->pci_dev->irq, dev);
+#ifdef CONFIG_PCI_MSI
+fail2:
+ if (dev->msi_enabled)
+ pci_disable_msi(dev->pci_dev);
+#endif
return stat;
}
@@ -1379,10 +1411,8 @@ static void release_channel(struct ngene_channel *chan)
struct ngene_info *ni = dev->card_info;
int io = ni->io_type[chan->number];
-#ifdef COMMAND_TIMEOUT_WORKAROUND
- if (chan->running)
+ if (chan->dev->cmd_timeout_workaround && chan->running)
set_transfer(chan, 0);
-#endif
tasklet_kill(&chan->demux_tasklet);
diff --git a/drivers/media/dvb/ngene/ngene-dvb.c b/drivers/media/dvb/ngene/ngene-dvb.c
index 96013eb353cd..48f980b21d66 100644
--- a/drivers/media/dvb/ngene/ngene-dvb.c
+++ b/drivers/media/dvb/ngene/ngene-dvb.c
@@ -37,15 +37,12 @@
#include <linux/pci.h>
#include <linux/smp_lock.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/byteorder/generic.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
#include "ngene.h"
-#define COMMAND_TIMEOUT_WORKAROUND
-
/****************************************************************************/
/* COMMAND API interface ****************************************************/
@@ -69,9 +66,7 @@ void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
struct ngene_channel *chan = priv;
-#ifdef COMMAND_TIMEOUT_WORKAROUND
if (chan->users > 0)
-#endif
dvb_dmx_swfilter(&chan->demux, buf, len);
return NULL;
}
@@ -106,11 +101,8 @@ int ngene_start_feed(struct dvb_demux_feed *dvbdmxfeed)
struct ngene_channel *chan = dvbdmx->priv;
if (chan->users == 0) {
-#ifdef COMMAND_TIMEOUT_WORKAROUND
- if (!chan->running)
-#endif
+ if (!chan->dev->cmd_timeout_workaround || !chan->running)
set_transfer(chan, 1);
- /* msleep(10); */
}
return ++chan->users;
@@ -124,9 +116,8 @@ int ngene_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
if (--chan->users)
return chan->users;
-#ifndef COMMAND_TIMEOUT_WORKAROUND
- set_transfer(chan, 0);
-#endif
+ if (!chan->dev->cmd_timeout_workaround)
+ set_transfer(chan, 0);
return 0;
}
diff --git a/drivers/media/dvb/ngene/ngene-i2c.c b/drivers/media/dvb/ngene/ngene-i2c.c
index 2ef54ca6badd..477fe0aade86 100644
--- a/drivers/media/dvb/ngene/ngene-i2c.c
+++ b/drivers/media/dvb/ngene/ngene-i2c.c
@@ -39,7 +39,6 @@
#include <linux/pci_ids.h>
#include <linux/smp_lock.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/byteorder/generic.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
diff --git a/drivers/media/dvb/ngene/ngene.h b/drivers/media/dvb/ngene/ngene.h
index 676fcbb79026..8fb4200f83f8 100644
--- a/drivers/media/dvb/ngene/ngene.h
+++ b/drivers/media/dvb/ngene/ngene.h
@@ -725,6 +725,8 @@ struct ngene {
u32 device_version;
u32 fw_interface_version;
u32 icounts;
+ bool msi_enabled;
+ bool cmd_timeout_workaround;
u8 *CmdDoneByte;
int BootFirmware;
@@ -797,6 +799,7 @@ struct ngene_info {
#define NGENE_VBOX_V2 7
int fw_version;
+ bool msi_supported;
char *name;
int io_type[MAX_STREAM];
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index d8d4214fd65f..32a7ec65ec42 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -68,13 +68,14 @@ config DVB_BUDGET
select DVB_VES1820 if !DVB_FE_CUSTOMISE
select DVB_L64781 if !DVB_FE_CUSTOMISE
select DVB_TDA8083 if !DVB_FE_CUSTOMISE
- select DVB_TDA10021 if !DVB_FE_CUSTOMISE
- select DVB_TDA10023 if !DVB_FE_CUSTOMISE
select DVB_S5H1420 if !DVB_FE_CUSTOMISE
select DVB_TDA10086 if !DVB_FE_CUSTOMISE
select DVB_TDA826X if !DVB_FE_CUSTOMISE
select DVB_LNBP21 if !DVB_FE_CUSTOMISE
select DVB_TDA1004X if !DVB_FE_CUSTOMISE
+ select DVB_ISL6423 if !DVB_FE_CUSTOMISE
+ select DVB_STV090x if !DVB_FE_CUSTOMISE
+ select DVB_STV6110x if !DVB_FE_CUSTOMISE
help
Support for simple SAA7146 based DVB cards (so called Budget-
or Nova-PCI cards) without onboard MPEG2 decoder, and without
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 38915591c6e5..a6be529eec5c 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -708,7 +708,7 @@ static void gpioirq(unsigned long cookie)
#ifdef CONFIG_DVB_AV7110_OSD
-static int dvb_osd_ioctl(struct inode *inode, struct file *file,
+static int dvb_osd_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -727,7 +727,7 @@ static int dvb_osd_ioctl(struct inode *inode, struct file *file,
static const struct file_operations dvb_osd_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_generic_open,
.release = dvb_generic_release,
};
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 53884814161c..13efba942dac 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -1089,7 +1089,7 @@ static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len
}
-static int dvb_video_ioctl(struct inode *inode, struct file *file,
+static int dvb_video_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1297,7 +1297,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
return ret;
}
-static int dvb_audio_ioctl(struct inode *inode, struct file *file,
+static int dvb_audio_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1517,7 +1517,7 @@ static int dvb_audio_release(struct inode *inode, struct file *file)
static const struct file_operations dvb_video_fops = {
.owner = THIS_MODULE,
.write = dvb_video_write,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_video_open,
.release = dvb_video_release,
.poll = dvb_video_poll,
@@ -1535,7 +1535,7 @@ static struct dvb_device dvbdev_video = {
static const struct file_operations dvb_audio_fops = {
.owner = THIS_MODULE,
.write = dvb_audio_write,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_audio_open,
.release = dvb_audio_release,
.poll = dvb_audio_poll,
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index ac7779c45c5b..4eba35a018e3 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -248,8 +248,7 @@ static unsigned int dvb_ca_poll (struct file *file, poll_table *wait)
return mask;
}
-static int dvb_ca_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, void *parg)
+static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
@@ -350,7 +349,7 @@ static const struct file_operations dvb_ca_fops = {
.owner = THIS_MODULE,
.read = dvb_ca_read,
.write = dvb_ca_write,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_ca_open,
.release = dvb_generic_release,
.poll = dvb_ca_poll,
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 461714396331..13ac9e3ab121 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -215,6 +215,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
break;
case 0x1010:
case 0x1017:
+ case 0x1019:
case 0x101a:
/* for the Technotrend 1500 bundled remote */
ir_codes = RC_MAP_TT_1500;
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index a5844d08d8b7..67a4ec8768a6 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -482,7 +482,6 @@ static __devexit int si470x_i2c_remove(struct i2c_client *client)
cancel_work_sync(&radio->radio_work);
video_unregister_device(radio->videodev);
kfree(radio);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index ad9e6f9c22e9..bdbc9d305419 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -646,7 +646,7 @@ config VIDEO_PMS
config VIDEO_BWQCAM
tristate "Quickcam BW Video For Linux"
- depends on PARPORT && VIDEO_V4L1
+ depends on PARPORT && VIDEO_V4L2
help
Say Y have if you the black and white version of the QuickCam
camera. See the next option for the color version.
@@ -656,7 +656,7 @@ config VIDEO_BWQCAM
config VIDEO_CQCAM
tristate "QuickCam Colour Video For Linux (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PARPORT && VIDEO_V4L1
+ depends on EXPERIMENTAL && PARPORT && VIDEO_V4L2
help
This is the video4linux driver for the colour version of the
Connectix QuickCam. If you have one of these cameras, say Y here,
diff --git a/drivers/media/video/ak881x.c b/drivers/media/video/ak881x.c
index 35390d4717b9..1573392f74bd 100644
--- a/drivers/media/video/ak881x.c
+++ b/drivers/media/video/ak881x.c
@@ -11,6 +11,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/ak881x.h>
@@ -141,7 +142,7 @@ static int ak881x_s_mbus_fmt(struct v4l2_subdev *sd,
return ak881x_try_g_mbus_fmt(sd, mf);
}
-static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, int index,
+static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
if (index)
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 3c9e754d73a0..935e0c9a9674 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -66,19 +66,58 @@ OTHER DEALINGS IN THE SOFTWARE.
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/parport.h>
#include <linux/sched.h>
-#include <linux/videodev.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
-
-#include "bw-qcam.h"
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+
+/* One from column A... */
+#define QC_NOTSET 0
+#define QC_UNIDIR 1
+#define QC_BIDIR 2
+#define QC_SERIAL 3
+
+/* ... and one from column B */
+#define QC_ANY 0x00
+#define QC_FORCE_UNIDIR 0x10
+#define QC_FORCE_BIDIR 0x20
+#define QC_FORCE_SERIAL 0x30
+/* in the port_mode member */
+
+#define QC_MODE_MASK 0x07
+#define QC_FORCE_MASK 0x70
+
+#define MAX_HEIGHT 243
+#define MAX_WIDTH 336
+
+/* Bit fields for status flags */
+#define QC_PARAM_CHANGE 0x01 /* Camera status change has occurred */
+
+struct qcam {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct pardevice *pdev;
+ struct parport *pport;
+ struct mutex lock;
+ int width, height;
+ int bpp;
+ int mode;
+ int contrast, brightness, whitebal;
+ int port_mode;
+ int transfer_scale;
+ int top, left;
+ int status;
+ unsigned int saved_bits;
+ unsigned long in_use;
+};
static unsigned int maxpoll = 250; /* Maximum busy-loop count for qcam I/O */
static unsigned int yieldlines = 4; /* Yield after this many during capture */
@@ -93,22 +132,26 @@ module_param(video_nr, int, 0);
* immediately attempt to initialize qcam */
module_param(force_init, int, 0);
-static inline int read_lpstatus(struct qcam_device *q)
+#define MAX_CAMS 4
+static struct qcam *qcams[MAX_CAMS];
+static unsigned int num_cams;
+
+static inline int read_lpstatus(struct qcam *q)
{
return parport_read_status(q->pport);
}
-static inline int read_lpdata(struct qcam_device *q)
+static inline int read_lpdata(struct qcam *q)
{
return parport_read_data(q->pport);
}
-static inline void write_lpdata(struct qcam_device *q, int d)
+static inline void write_lpdata(struct qcam *q, int d)
{
parport_write_data(q->pport, d);
}
-static inline void write_lpcontrol(struct qcam_device *q, int d)
+static void write_lpcontrol(struct qcam *q, int d)
{
if (d & 0x20) {
/* Set bidirectional mode to reverse (data in) */
@@ -124,126 +167,11 @@ static inline void write_lpcontrol(struct qcam_device *q, int d)
parport_write_control(q->pport, d);
}
-static int qc_waithand(struct qcam_device *q, int val);
-static int qc_command(struct qcam_device *q, int command);
-static int qc_readparam(struct qcam_device *q);
-static int qc_setscanmode(struct qcam_device *q);
-static int qc_readbytes(struct qcam_device *q, char buffer[]);
-
-static struct video_device qcam_template;
-
-static int qc_calibrate(struct qcam_device *q)
-{
- /*
- * Bugfix by Hanno Mueller hmueller@kabel.de, Mai 21 96
- * The white balance is an individiual value for each
- * quickcam.
- */
-
- int value;
- int count = 0;
-
- qc_command(q, 27); /* AutoAdjustOffset */
- qc_command(q, 0); /* Dummy Parameter, ignored by the camera */
-
- /* GetOffset (33) will read 255 until autocalibration */
- /* is finished. After that, a value of 1-254 will be */
- /* returned. */
-
- do {
- qc_command(q, 33);
- value = qc_readparam(q);
- mdelay(1);
- schedule();
- count++;
- } while (value == 0xff && count < 2048);
-
- q->whitebal = value;
- return value;
-}
-
-/* Initialize the QuickCam driver control structure. This is where
- * defaults are set for people who don't have a config file.*/
-
-static struct qcam_device *qcam_init(struct parport *port)
-{
- struct qcam_device *q;
-
- q = kmalloc(sizeof(struct qcam_device), GFP_KERNEL);
- if (q == NULL)
- return NULL;
-
- q->pport = port;
- q->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
- NULL, 0, NULL);
- if (q->pdev == NULL) {
- printk(KERN_ERR "bw-qcam: couldn't register for %s.\n",
- port->name);
- kfree(q);
- return NULL;
- }
-
- memcpy(&q->vdev, &qcam_template, sizeof(qcam_template));
-
- mutex_init(&q->lock);
-
- q->port_mode = (QC_ANY | QC_NOTSET);
- q->width = 320;
- q->height = 240;
- q->bpp = 4;
- q->transfer_scale = 2;
- q->contrast = 192;
- q->brightness = 180;
- q->whitebal = 105;
- q->top = 1;
- q->left = 14;
- q->mode = -1;
- q->status = QC_PARAM_CHANGE;
- return q;
-}
-
-
-/* qc_command is probably a bit of a misnomer -- it's used to send
- * bytes *to* the camera. Generally, these bytes are either commands
- * or arguments to commands, so the name fits, but it still bugs me a
- * bit. See the documentation for a list of commands. */
-
-static int qc_command(struct qcam_device *q, int command)
-{
- int n1, n2;
- int cmd;
-
- write_lpdata(q, command);
- write_lpcontrol(q, 6);
-
- n1 = qc_waithand(q, 1);
-
- write_lpcontrol(q, 0xe);
- n2 = qc_waithand(q, 0);
-
- cmd = (n1 & 0xf0) | ((n2 & 0xf0) >> 4);
- return cmd;
-}
-
-static int qc_readparam(struct qcam_device *q)
-{
- int n1, n2;
- int cmd;
-
- write_lpcontrol(q, 6);
- n1 = qc_waithand(q, 1);
-
- write_lpcontrol(q, 0xe);
- n2 = qc_waithand(q, 0);
-
- cmd = (n1 & 0xf0) | ((n2 & 0xf0) >> 4);
- return cmd;
-}
/* qc_waithand busy-waits for a handshake signal from the QuickCam.
* Almost all communication with the camera requires handshaking. */
-static int qc_waithand(struct qcam_device *q, int val)
+static int qc_waithand(struct qcam *q, int val)
{
int status;
int runs = 0;
@@ -286,7 +214,7 @@ static int qc_waithand(struct qcam_device *q, int val)
* (bit 3 of status register). It also returns the last value read,
* since this data is useful. */
-static unsigned int qc_waithand2(struct qcam_device *q, int val)
+static unsigned int qc_waithand2(struct qcam *q, int val)
{
unsigned int status;
int runs = 0;
@@ -309,6 +237,43 @@ static unsigned int qc_waithand2(struct qcam_device *q, int val)
return status;
}
+/* qc_command is probably a bit of a misnomer -- it's used to send
+ * bytes *to* the camera. Generally, these bytes are either commands
+ * or arguments to commands, so the name fits, but it still bugs me a
+ * bit. See the documentation for a list of commands. */
+
+static int qc_command(struct qcam *q, int command)
+{
+ int n1, n2;
+ int cmd;
+
+ write_lpdata(q, command);
+ write_lpcontrol(q, 6);
+
+ n1 = qc_waithand(q, 1);
+
+ write_lpcontrol(q, 0xe);
+ n2 = qc_waithand(q, 0);
+
+ cmd = (n1 & 0xf0) | ((n2 & 0xf0) >> 4);
+ return cmd;
+}
+
+static int qc_readparam(struct qcam *q)
+{
+ int n1, n2;
+ int cmd;
+
+ write_lpcontrol(q, 6);
+ n1 = qc_waithand(q, 1);
+
+ write_lpcontrol(q, 0xe);
+ n2 = qc_waithand(q, 0);
+
+ cmd = (n1 & 0xf0) | ((n2 & 0xf0) >> 4);
+ return cmd;
+}
+
/* Try to detect a QuickCam. It appears to flash the upper 4 bits of
the status register at 5-10 Hz. This is only used in the autoprobe
@@ -317,7 +282,7 @@ static unsigned int qc_waithand2(struct qcam_device *q, int val)
almost completely safe, while their method screws up my printer if
I plug it in before the camera. */
-static int qc_detect(struct qcam_device *q)
+static int qc_detect(struct qcam *q)
{
int reg, lastreg;
int count = 0;
@@ -358,41 +323,6 @@ static int qc_detect(struct qcam_device *q)
}
}
-
-/* Reset the QuickCam. This uses the same sequence the Windows
- * QuickPic program uses. Someone with a bi-directional port should
- * check that bi-directional mode is detected right, and then
- * implement bi-directional mode in qc_readbyte(). */
-
-static void qc_reset(struct qcam_device *q)
-{
- switch (q->port_mode & QC_FORCE_MASK) {
- case QC_FORCE_UNIDIR:
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
- break;
-
- case QC_FORCE_BIDIR:
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
- break;
-
- case QC_ANY:
- write_lpcontrol(q, 0x20);
- write_lpdata(q, 0x75);
-
- if (read_lpdata(q) != 0x75)
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
- else
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
- break;
- }
-
- write_lpcontrol(q, 0xb);
- udelay(250);
- write_lpcontrol(q, 0xe);
- qc_setscanmode(q); /* in case port_mode changed */
-}
-
-
/* Decide which scan mode to use. There's no real requirement that
* the scanmode match the resolution in q->height and q-> width -- the
* camera takes the picture at the resolution specified in the
@@ -402,7 +332,7 @@ static void qc_reset(struct qcam_device *q)
* returned. If the scan is smaller, then the rest of the image
* returned contains garbage. */
-static int qc_setscanmode(struct qcam_device *q)
+static int qc_setscanmode(struct qcam *q)
{
int old_mode = q->mode;
@@ -442,10 +372,45 @@ static int qc_setscanmode(struct qcam_device *q)
}
+/* Reset the QuickCam. This uses the same sequence the Windows
+ * QuickPic program uses. Someone with a bi-directional port should
+ * check that bi-directional mode is detected right, and then
+ * implement bi-directional mode in qc_readbyte(). */
+
+static void qc_reset(struct qcam *q)
+{
+ switch (q->port_mode & QC_FORCE_MASK) {
+ case QC_FORCE_UNIDIR:
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
+ break;
+
+ case QC_FORCE_BIDIR:
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
+ break;
+
+ case QC_ANY:
+ write_lpcontrol(q, 0x20);
+ write_lpdata(q, 0x75);
+
+ if (read_lpdata(q) != 0x75)
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
+ else
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
+ break;
+ }
+
+ write_lpcontrol(q, 0xb);
+ udelay(250);
+ write_lpcontrol(q, 0xe);
+ qc_setscanmode(q); /* in case port_mode changed */
+}
+
+
+
/* Reset the QuickCam and program for brightness, contrast,
* white-balance, and resolution. */
-static void qc_set(struct qcam_device *q)
+static void qc_set(struct qcam *q)
{
int val;
int val2;
@@ -499,7 +464,7 @@ static void qc_set(struct qcam_device *q)
the supplied buffer. It returns the number of bytes read,
or -1 on error. */
-static inline int qc_readbytes(struct qcam_device *q, char buffer[])
+static inline int qc_readbytes(struct qcam *q, char buffer[])
{
int ret = 1;
unsigned int hi, lo;
@@ -590,7 +555,7 @@ static inline int qc_readbytes(struct qcam_device *q, char buffer[])
* n=2^(bit depth)-1. Ask me for more details if you don't understand
* this. */
-static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long len)
+static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
{
int i, j, k, yield;
int bytes;
@@ -674,171 +639,206 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
* Video4linux interfacing
*/
-static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+static int qcam_querycap(struct file *file, void *priv,
+ struct v4l2_capability *vcap)
{
- struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam = (struct qcam_device *)dev;
-
- switch (cmd) {
- case VIDIOCGCAP:
- {
- struct video_capability *b = arg;
- strcpy(b->name, "Quickcam");
- b->type = VID_TYPE_CAPTURE|VID_TYPE_SCALES|VID_TYPE_MONOCHROME;
- b->channels = 1;
- b->audios = 0;
- b->maxwidth = 320;
- b->maxheight = 240;
- b->minwidth = 80;
- b->minheight = 60;
- return 0;
- }
- case VIDIOCGCHAN:
- {
- struct video_channel *v = arg;
- if (v->channel != 0)
- return -EINVAL;
- v->flags = 0;
- v->tuners = 0;
- /* Good question.. its composite or SVHS so.. */
- v->type = VIDEO_TYPE_CAMERA;
- strcpy(v->name, "Camera");
- return 0;
- }
- case VIDIOCSCHAN:
- {
- struct video_channel *v = arg;
- if (v->channel != 0)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGTUNER:
- {
- struct video_tuner *v = arg;
- if (v->tuner)
- return -EINVAL;
- strcpy(v->name, "Format");
- v->rangelow = 0;
- v->rangehigh = 0;
- v->flags = 0;
- v->mode = VIDEO_MODE_AUTO;
- return 0;
- }
- case VIDIOCSTUNER:
- {
- struct video_tuner *v = arg;
- if (v->tuner)
- return -EINVAL;
- if (v->mode != VIDEO_MODE_AUTO)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGPICT:
- {
- struct video_picture *p = arg;
- p->colour = 0x8000;
- p->hue = 0x8000;
- p->brightness = qcam->brightness << 8;
- p->contrast = qcam->contrast << 8;
- p->whiteness = qcam->whitebal << 8;
- p->depth = qcam->bpp;
- p->palette = VIDEO_PALETTE_GREY;
- return 0;
- }
- case VIDIOCSPICT:
- {
- struct video_picture *p = arg;
- if (p->palette != VIDEO_PALETTE_GREY)
- return -EINVAL;
- if (p->depth != 4 && p->depth != 6)
- return -EINVAL;
-
- /*
- * Now load the camera.
- */
-
- qcam->brightness = p->brightness >> 8;
- qcam->contrast = p->contrast >> 8;
- qcam->whitebal = p->whiteness >> 8;
- qcam->bpp = p->depth;
-
- mutex_lock(&qcam->lock);
- qc_setscanmode(qcam);
- mutex_unlock(&qcam->lock);
- qcam->status |= QC_PARAM_CHANGE;
+ struct qcam *qcam = video_drvdata(file);
- return 0;
- }
- case VIDIOCSWIN:
- {
- struct video_window *vw = arg;
- if (vw->flags)
- return -EINVAL;
- if (vw->clipcount)
- return -EINVAL;
- if (vw->height < 60 || vw->height > 240)
- return -EINVAL;
- if (vw->width < 80 || vw->width > 320)
- return -EINVAL;
-
- qcam->width = 320;
- qcam->height = 240;
- qcam->transfer_scale = 4;
-
- if (vw->width >= 160 && vw->height >= 120)
- qcam->transfer_scale = 2;
- if (vw->width >= 320 && vw->height >= 240) {
- qcam->width = 320;
- qcam->height = 240;
- qcam->transfer_scale = 1;
- }
- mutex_lock(&qcam->lock);
- qc_setscanmode(qcam);
- mutex_unlock(&qcam->lock);
+ strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
+ strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
+ strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
+ vcap->version = KERNEL_VERSION(0, 0, 2);
+ vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ return 0;
+}
- /* We must update the camera before we grab. We could
- just have changed the grab size */
- qcam->status |= QC_PARAM_CHANGE;
+static int qcam_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
+{
+ if (vin->index > 0)
+ return -EINVAL;
+ strlcpy(vin->name, "Camera", sizeof(vin->name));
+ vin->type = V4L2_INPUT_TYPE_CAMERA;
+ vin->audioset = 0;
+ vin->tuner = 0;
+ vin->std = 0;
+ vin->status = 0;
+ return 0;
+}
- /* Ok we figured out what to use from our wide choice */
- return 0;
- }
- case VIDIOCGWIN:
- {
- struct video_window *vw = arg;
+static int qcam_g_input(struct file *file, void *fh, unsigned int *inp)
+{
+ *inp = 0;
+ return 0;
+}
- memset(vw, 0, sizeof(*vw));
- vw->width = qcam->width / qcam->transfer_scale;
- vw->height = qcam->height / qcam->transfer_scale;
- return 0;
- }
- case VIDIOCKEY:
- return 0;
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
- return -EINVAL;
+static int qcam_s_input(struct file *file, void *fh, unsigned int inp)
+{
+ return (inp > 0) ? -EINVAL : 0;
+}
+
+static int qcam_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 180);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 192);
+ case V4L2_CID_GAMMA:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 105);
+ }
+ return -EINVAL;
+}
+
+static int qcam_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct qcam *qcam = video_drvdata(file);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = qcam->brightness;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = qcam->contrast;
+ break;
+ case V4L2_CID_GAMMA:
+ ctrl->value = qcam->whitebal;
+ break;
default:
- return -ENOIOCTLCMD;
+ ret = -EINVAL;
+ break;
}
+ return ret;
+}
+
+static int qcam_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct qcam *qcam = video_drvdata(file);
+ int ret = 0;
+
+ mutex_lock(&qcam->lock);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ qcam->brightness = ctrl->value;
+ break;
+ case V4L2_CID_CONTRAST:
+ qcam->contrast = ctrl->value;
+ break;
+ case V4L2_CID_GAMMA:
+ qcam->whitebal = ctrl->value;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret == 0) {
+ qc_setscanmode(qcam);
+ qcam->status |= QC_PARAM_CHANGE;
+ }
+ mutex_unlock(&qcam->lock);
+ return ret;
+}
+
+static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct qcam *qcam = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ pix->width = qcam->width / qcam->transfer_scale;
+ pix->height = qcam->height / qcam->transfer_scale;
+ pix->pixelformat = (qcam->bpp == 4) ? V4L2_PIX_FMT_Y4 : V4L2_PIX_FMT_Y6;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = qcam->width;
+ pix->sizeimage = qcam->width * qcam->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ return 0;
+}
+
+static int qcam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ if (pix->height <= 60 || pix->width <= 80) {
+ pix->height = 60;
+ pix->width = 80;
+ } else if (pix->height <= 120 || pix->width <= 160) {
+ pix->height = 120;
+ pix->width = 160;
+ } else {
+ pix->height = 240;
+ pix->width = 320;
+ }
+ if (pix->pixelformat != V4L2_PIX_FMT_Y4 &&
+ pix->pixelformat != V4L2_PIX_FMT_Y6)
+ pix->pixelformat = V4L2_PIX_FMT_Y4;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = pix->width;
+ pix->sizeimage = pix->width * pix->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ return 0;
+}
+
+static int qcam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct qcam *qcam = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int ret = qcam_try_fmt_vid_cap(file, fh, fmt);
+
+ if (ret)
+ return ret;
+ qcam->width = 320;
+ qcam->height = 240;
+ if (pix->height == 60)
+ qcam->transfer_scale = 4;
+ else if (pix->height == 120)
+ qcam->transfer_scale = 2;
+ else
+ qcam->transfer_scale = 1;
+ if (pix->pixelformat == V4L2_PIX_FMT_Y6)
+ qcam->bpp = 6;
+ else
+ qcam->bpp = 4;
+
+ mutex_lock(&qcam->lock);
+ qc_setscanmode(qcam);
+ /* We must update the camera before we grab. We could
+ just have changed the grab size */
+ qcam->status |= QC_PARAM_CHANGE;
+ mutex_unlock(&qcam->lock);
return 0;
}
-static long qcam_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
{
- return video_usercopy(file, cmd, arg, qcam_do_ioctl);
+ static struct v4l2_fmtdesc formats[] = {
+ { 0, 0, 0,
+ "4-Bit Monochrome", V4L2_PIX_FMT_Y4,
+ { 0, 0, 0, 0 }
+ },
+ { 0, 0, 0,
+ "6-Bit Monochrome", V4L2_PIX_FMT_Y6,
+ { 0, 0, 0, 0 }
+ },
+ };
+ enum v4l2_buf_type type = fmt->type;
+
+ if (fmt->index > 1)
+ return -EINVAL;
+
+ *fmt = formats[fmt->index];
+ fmt->type = type;
+ return 0;
}
static ssize_t qcam_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct video_device *v = video_devdata(file);
- struct qcam_device *qcam = (struct qcam_device *)v;
+ struct qcam *qcam = video_drvdata(file);
int len;
parport_claim_or_block(qcam->pdev);
@@ -858,43 +858,112 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
return len;
}
-static int qcam_exclusive_open(struct file *file)
+static const struct v4l2_file_operations qcam_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = video_ioctl2,
+ .read = qcam_read,
+};
+
+static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
+ .vidioc_querycap = qcam_querycap,
+ .vidioc_g_input = qcam_g_input,
+ .vidioc_s_input = qcam_s_input,
+ .vidioc_enum_input = qcam_enum_input,
+ .vidioc_queryctrl = qcam_queryctrl,
+ .vidioc_g_ctrl = qcam_g_ctrl,
+ .vidioc_s_ctrl = qcam_s_ctrl,
+ .vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
+};
+
+/* Initialize the QuickCam driver control structure. This is where
+ * defaults are set for people who don't have a config file.*/
+
+static struct qcam *qcam_init(struct parport *port)
{
- struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam = (struct qcam_device *)dev;
+ struct qcam *qcam;
+ struct v4l2_device *v4l2_dev;
+
+ qcam = kzalloc(sizeof(struct qcam), GFP_KERNEL);
+ if (qcam == NULL)
+ return NULL;
+
+ v4l2_dev = &qcam->v4l2_dev;
+ strlcpy(v4l2_dev->name, "bw-qcam", sizeof(v4l2_dev->name));
- return test_and_set_bit(0, &qcam->in_use) ? -EBUSY : 0;
+ if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return NULL;
+ }
+
+ qcam->pport = port;
+ qcam->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
+ NULL, 0, NULL);
+ if (qcam->pdev == NULL) {
+ v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
+ kfree(qcam);
+ return NULL;
+ }
+
+ strlcpy(qcam->vdev.name, "Connectix QuickCam", sizeof(qcam->vdev.name));
+ qcam->vdev.v4l2_dev = v4l2_dev;
+ qcam->vdev.fops = &qcam_fops;
+ qcam->vdev.ioctl_ops = &qcam_ioctl_ops;
+ qcam->vdev.release = video_device_release_empty;
+ video_set_drvdata(&qcam->vdev, qcam);
+
+ mutex_init(&qcam->lock);
+
+ qcam->port_mode = (QC_ANY | QC_NOTSET);
+ qcam->width = 320;
+ qcam->height = 240;
+ qcam->bpp = 4;
+ qcam->transfer_scale = 2;
+ qcam->contrast = 192;
+ qcam->brightness = 180;
+ qcam->whitebal = 105;
+ qcam->top = 1;
+ qcam->left = 14;
+ qcam->mode = -1;
+ qcam->status = QC_PARAM_CHANGE;
+ return qcam;
}
-static int qcam_exclusive_release(struct file *file)
+static int qc_calibrate(struct qcam *q)
{
- struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam = (struct qcam_device *)dev;
+ /*
+ * Bugfix by Hanno Mueller hmueller@kabel.de, Mai 21 96
+ * The white balance is an individual value for each
+ * quickcam.
+ */
- clear_bit(0, &qcam->in_use);
- return 0;
-}
+ int value;
+ int count = 0;
-static const struct v4l2_file_operations qcam_fops = {
- .owner = THIS_MODULE,
- .open = qcam_exclusive_open,
- .release = qcam_exclusive_release,
- .ioctl = qcam_ioctl,
- .read = qcam_read,
-};
-static struct video_device qcam_template = {
- .name = "Connectix Quickcam",
- .fops = &qcam_fops,
- .release = video_device_release_empty,
-};
+ qc_command(q, 27); /* AutoAdjustOffset */
+ qc_command(q, 0); /* Dummy Parameter, ignored by the camera */
-#define MAX_CAMS 4
-static struct qcam_device *qcams[MAX_CAMS];
-static unsigned int num_cams;
+ /* GetOffset (33) will read 255 until autocalibration */
+ /* is finished. After that, a value of 1-254 will be */
+ /* returned. */
+
+ do {
+ qc_command(q, 33);
+ value = qc_readparam(q);
+ mdelay(1);
+ schedule();
+ count++;
+ } while (value == 0xff && count < 2048);
+
+ q->whitebal = value;
+ return value;
+}
static int init_bwqcam(struct parport *port)
{
- struct qcam_device *qcam;
+ struct qcam *qcam;
if (num_cams == MAX_CAMS) {
printk(KERN_ERR "Too many Quickcams (max %d)\n", MAX_CAMS);
@@ -919,7 +988,7 @@ static int init_bwqcam(struct parport *port)
parport_release(qcam->pdev);
- printk(KERN_INFO "Connectix Quickcam on %s\n", qcam->pport->name);
+ v4l2_info(&qcam->v4l2_dev, "Connectix Quickcam on %s\n", qcam->pport->name);
if (video_register_device(&qcam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
parport_unregister_device(qcam->pdev);
@@ -932,7 +1001,7 @@ static int init_bwqcam(struct parport *port)
return 0;
}
-static void close_bwqcam(struct qcam_device *qcam)
+static void close_bwqcam(struct qcam *qcam)
{
video_unregister_device(&qcam->vdev);
parport_unregister_device(qcam->pdev);
@@ -983,7 +1052,7 @@ static void bwqcam_detach(struct parport *port)
{
int i;
for (i = 0; i < num_cams; i++) {
- struct qcam_device *qcam = qcams[i];
+ struct qcam *qcam = qcams[i];
if (qcam && qcam->pdev->port == port) {
qcams[i] = NULL;
close_bwqcam(qcam);
diff --git a/drivers/media/video/bw-qcam.h b/drivers/media/video/bw-qcam.h
deleted file mode 100644
index 8a60c5de0935..000000000000
--- a/drivers/media/video/bw-qcam.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Video4Linux bw-qcam driver
- *
- * Derived from code..
- */
-
-/******************************************************************
-
-Copyright (C) 1996 by Scott Laird
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL SCOTT LAIRD BE LIABLE FOR ANY CLAIM, DAMAGES OR
-OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
-******************************************************************/
-
-/* One from column A... */
-#define QC_NOTSET 0
-#define QC_UNIDIR 1
-#define QC_BIDIR 2
-#define QC_SERIAL 3
-
-/* ... and one from column B */
-#define QC_ANY 0x00
-#define QC_FORCE_UNIDIR 0x10
-#define QC_FORCE_BIDIR 0x20
-#define QC_FORCE_SERIAL 0x30
-/* in the port_mode member */
-
-#define QC_MODE_MASK 0x07
-#define QC_FORCE_MASK 0x70
-
-#define MAX_HEIGHT 243
-#define MAX_WIDTH 336
-
-/* Bit fields for status flags */
-#define QC_PARAM_CHANGE 0x01 /* Camera status change has occurred */
-
-struct qcam_device {
- struct video_device vdev;
- struct pardevice *pdev;
- struct parport *pport;
- struct mutex lock;
- int width, height;
- int bpp;
- int mode;
- int contrast, brightness, whitebal;
- int port_mode;
- int transfer_scale;
- int top, left;
- int status;
- unsigned int saved_bits;
- unsigned long in_use;
-};
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 8f1dd88b32a6..6e4b19698c13 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -33,15 +33,17 @@
#include <linux/mm.h>
#include <linux/parport.h>
#include <linux/sched.h>
-#include <linux/videodev.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
-
+#include <linux/version.h>
+#include <linux/videodev2.h>
#include <asm/uaccess.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
-struct qcam_device {
+struct qcam {
+ struct v4l2_device v4l2_dev;
struct video_device vdev;
struct pardevice *pdev;
struct parport *pport;
@@ -51,7 +53,6 @@ struct qcam_device {
int contrast, brightness, whitebal;
int top, left;
unsigned int bidirectional;
- unsigned long in_use;
struct mutex lock;
};
@@ -68,33 +69,45 @@ struct qcam_device {
#define QC_DECIMATION_2 2
#define QC_DECIMATION_4 4
-#define BANNER "Colour QuickCam for Video4Linux v0.05"
+#define BANNER "Colour QuickCam for Video4Linux v0.06"
static int parport[MAX_CAMS] = { [1 ... MAX_CAMS-1] = -1 };
static int probe = 2;
static int force_rgb;
static int video_nr = -1;
-static inline void qcam_set_ack(struct qcam_device *qcam, unsigned int i)
+/* FIXME: parport=auto would never have worked, surely? --RR */
+MODULE_PARM_DESC(parport, "parport=<auto|n[,n]...> for port detection method\n"
+ "probe=<0|1|2> for camera detection method\n"
+ "force_rgb=<0|1> for RGB data format (default BGR)");
+module_param_array(parport, int, NULL, 0);
+module_param(probe, int, 0);
+module_param(force_rgb, bool, 0);
+module_param(video_nr, int, 0);
+
+static struct qcam *qcams[MAX_CAMS];
+static unsigned int num_cams;
+
+static inline void qcam_set_ack(struct qcam *qcam, unsigned int i)
{
/* note: the QC specs refer to the PCAck pin by voltage, not
software level. PC ports have builtin inverters. */
parport_frob_control(qcam->pport, 8, i ? 8 : 0);
}
-static inline unsigned int qcam_ready1(struct qcam_device *qcam)
+static inline unsigned int qcam_ready1(struct qcam *qcam)
{
return (parport_read_status(qcam->pport) & 0x8) ? 1 : 0;
}
-static inline unsigned int qcam_ready2(struct qcam_device *qcam)
+static inline unsigned int qcam_ready2(struct qcam *qcam)
{
return (parport_read_data(qcam->pport) & 0x1) ? 1 : 0;
}
-static unsigned int qcam_await_ready1(struct qcam_device *qcam,
- int value)
+static unsigned int qcam_await_ready1(struct qcam *qcam, int value)
{
+ struct v4l2_device *v4l2_dev = &qcam->v4l2_dev;
unsigned long oldjiffies = jiffies;
unsigned int i;
@@ -112,14 +125,15 @@ static unsigned int qcam_await_ready1(struct qcam_device *qcam,
}
/* Probably somebody pulled the plug out. Not much we can do. */
- printk(KERN_ERR "c-qcam: ready1 timeout (%d) %x %x\n", value,
+ v4l2_err(v4l2_dev, "ready1 timeout (%d) %x %x\n", value,
parport_read_status(qcam->pport),
parport_read_control(qcam->pport));
return 1;
}
-static unsigned int qcam_await_ready2(struct qcam_device *qcam, int value)
+static unsigned int qcam_await_ready2(struct qcam *qcam, int value)
{
+ struct v4l2_device *v4l2_dev = &qcam->v4l2_dev;
unsigned long oldjiffies = jiffies;
unsigned int i;
@@ -137,14 +151,14 @@ static unsigned int qcam_await_ready2(struct qcam_device *qcam, int value)
}
/* Probably somebody pulled the plug out. Not much we can do. */
- printk(KERN_ERR "c-qcam: ready2 timeout (%d) %x %x %x\n", value,
+ v4l2_err(v4l2_dev, "ready2 timeout (%d) %x %x %x\n", value,
parport_read_status(qcam->pport),
parport_read_control(qcam->pport),
parport_read_data(qcam->pport));
return 1;
}
-static int qcam_read_data(struct qcam_device *qcam)
+static int qcam_read_data(struct qcam *qcam)
{
unsigned int idata;
@@ -159,21 +173,22 @@ static int qcam_read_data(struct qcam_device *qcam)
return idata;
}
-static int qcam_write_data(struct qcam_device *qcam, unsigned int data)
+static int qcam_write_data(struct qcam *qcam, unsigned int data)
{
+ struct v4l2_device *v4l2_dev = &qcam->v4l2_dev;
unsigned int idata;
parport_write_data(qcam->pport, data);
idata = qcam_read_data(qcam);
if (data != idata) {
- printk(KERN_WARNING "cqcam: sent %x but received %x\n", data,
+ v4l2_warn(v4l2_dev, "sent %x but received %x\n", data,
idata);
return 1;
}
return 0;
}
-static inline int qcam_set(struct qcam_device *qcam, unsigned int cmd, unsigned int data)
+static inline int qcam_set(struct qcam *qcam, unsigned int cmd, unsigned int data)
{
if (qcam_write_data(qcam, cmd))
return -1;
@@ -182,14 +197,14 @@ static inline int qcam_set(struct qcam_device *qcam, unsigned int cmd, unsigned
return 0;
}
-static inline int qcam_get(struct qcam_device *qcam, unsigned int cmd)
+static inline int qcam_get(struct qcam *qcam, unsigned int cmd)
{
if (qcam_write_data(qcam, cmd))
return -1;
return qcam_read_data(qcam);
}
-static int qc_detect(struct qcam_device *qcam)
+static int qc_detect(struct qcam *qcam)
{
unsigned int stat, ostat, i, count = 0;
@@ -246,7 +261,7 @@ static int qc_detect(struct qcam_device *qcam)
return 0;
}
-static void qc_reset(struct qcam_device *qcam)
+static void qc_reset(struct qcam *qcam)
{
parport_write_control(qcam->pport, 0xc);
parport_write_control(qcam->pport, 0x8);
@@ -258,55 +273,55 @@ static void qc_reset(struct qcam_device *qcam)
/* Reset the QuickCam and program for brightness, contrast,
* white-balance, and resolution. */
-static void qc_setup(struct qcam_device *q)
+static void qc_setup(struct qcam *qcam)
{
- qc_reset(q);
+ qc_reset(qcam);
/* Set the brightness. */
- qcam_set(q, 11, q->brightness);
+ qcam_set(qcam, 11, qcam->brightness);
/* Set the height and width. These refer to the actual
CCD area *before* applying the selected decimation. */
- qcam_set(q, 17, q->ccd_height);
- qcam_set(q, 19, q->ccd_width / 2);
+ qcam_set(qcam, 17, qcam->ccd_height);
+ qcam_set(qcam, 19, qcam->ccd_width / 2);
/* Set top and left. */
- qcam_set(q, 0xd, q->top);
- qcam_set(q, 0xf, q->left);
+ qcam_set(qcam, 0xd, qcam->top);
+ qcam_set(qcam, 0xf, qcam->left);
/* Set contrast and white balance. */
- qcam_set(q, 0x19, q->contrast);
- qcam_set(q, 0x1f, q->whitebal);
+ qcam_set(qcam, 0x19, qcam->contrast);
+ qcam_set(qcam, 0x1f, qcam->whitebal);
/* Set the speed. */
- qcam_set(q, 45, 2);
+ qcam_set(qcam, 45, 2);
}
/* Read some bytes from the camera and put them in the buffer.
nbytes should be a multiple of 3, because bidirectional mode gives
us three bytes at a time. */
-static unsigned int qcam_read_bytes(struct qcam_device *q, unsigned char *buf, unsigned int nbytes)
+static unsigned int qcam_read_bytes(struct qcam *qcam, unsigned char *buf, unsigned int nbytes)
{
unsigned int bytes = 0;
- qcam_set_ack(q, 0);
- if (q->bidirectional) {
+ qcam_set_ack(qcam, 0);
+ if (qcam->bidirectional) {
/* It's a bidirectional port */
while (bytes < nbytes) {
unsigned int lo1, hi1, lo2, hi2;
unsigned char r, g, b;
- if (qcam_await_ready2(q, 1))
+ if (qcam_await_ready2(qcam, 1))
return bytes;
- lo1 = parport_read_data(q->pport) >> 1;
- hi1 = ((parport_read_status(q->pport) >> 3) & 0x1f) ^ 0x10;
- qcam_set_ack(q, 1);
- if (qcam_await_ready2(q, 0))
+ lo1 = parport_read_data(qcam->pport) >> 1;
+ hi1 = ((parport_read_status(qcam->pport) >> 3) & 0x1f) ^ 0x10;
+ qcam_set_ack(qcam, 1);
+ if (qcam_await_ready2(qcam, 0))
return bytes;
- lo2 = parport_read_data(q->pport) >> 1;
- hi2 = ((parport_read_status(q->pport) >> 3) & 0x1f) ^ 0x10;
- qcam_set_ack(q, 0);
+ lo2 = parport_read_data(qcam->pport) >> 1;
+ hi2 = ((parport_read_status(qcam->pport) >> 3) & 0x1f) ^ 0x10;
+ qcam_set_ack(qcam, 0);
r = lo1 | ((hi1 & 1) << 7);
g = ((hi1 & 0x1e) << 3) | ((hi2 & 0x1e) >> 1);
b = lo2 | ((hi2 & 1) << 7);
@@ -328,14 +343,14 @@ static unsigned int qcam_read_bytes(struct qcam_device *q, unsigned char *buf, u
while (bytes < nbytes) {
unsigned int hi, lo;
- if (qcam_await_ready1(q, 1))
+ if (qcam_await_ready1(qcam, 1))
return bytes;
- hi = (parport_read_status(q->pport) & 0xf0);
- qcam_set_ack(q, 1);
- if (qcam_await_ready1(q, 0))
+ hi = (parport_read_status(qcam->pport) & 0xf0);
+ qcam_set_ack(qcam, 1);
+ if (qcam_await_ready1(qcam, 0))
return bytes;
- lo = (parport_read_status(q->pport) & 0xf0);
- qcam_set_ack(q, 0);
+ lo = (parport_read_status(qcam->pport) & 0xf0);
+ qcam_set_ack(qcam, 0);
/* flip some bits */
rgb[(i = bytes++ % 3)] = (hi | (lo >> 4)) ^ 0x88;
if (i >= 2) {
@@ -361,10 +376,11 @@ get_fragment:
#define BUFSZ 150
-static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long len)
+static long qc_capture(struct qcam *qcam, char __user *buf, unsigned long len)
{
+ struct v4l2_device *v4l2_dev = &qcam->v4l2_dev;
unsigned lines, pixelsperline, bitsperxfer;
- unsigned int is_bi_dir = q->bidirectional;
+ unsigned int is_bi_dir = qcam->bidirectional;
size_t wantlen, outptr = 0;
char tmpbuf[BUFSZ];
@@ -373,10 +389,10 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
/* Wait for camera to become ready */
for (;;) {
- int i = qcam_get(q, 41);
+ int i = qcam_get(qcam, 41);
if (i == -1) {
- qc_setup(q);
+ qc_setup(qcam);
return -EIO;
}
if ((i & 0x80) == 0)
@@ -384,25 +400,25 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
schedule();
}
- if (qcam_set(q, 7, (q->mode | (is_bi_dir ? 1 : 0)) + 1))
+ if (qcam_set(qcam, 7, (qcam->mode | (is_bi_dir ? 1 : 0)) + 1))
return -EIO;
- lines = q->height;
- pixelsperline = q->width;
+ lines = qcam->height;
+ pixelsperline = qcam->width;
bitsperxfer = (is_bi_dir) ? 24 : 8;
if (is_bi_dir) {
/* Turn the port around */
- parport_data_reverse(q->pport);
+ parport_data_reverse(qcam->pport);
mdelay(3);
- qcam_set_ack(q, 0);
- if (qcam_await_ready1(q, 1)) {
- qc_setup(q);
+ qcam_set_ack(qcam, 0);
+ if (qcam_await_ready1(qcam, 1)) {
+ qc_setup(qcam);
return -EIO;
}
- qcam_set_ack(q, 1);
- if (qcam_await_ready1(q, 0)) {
- qc_setup(q);
+ qcam_set_ack(qcam, 1);
+ if (qcam_await_ready1(qcam, 0)) {
+ qc_setup(qcam);
return -EIO;
}
}
@@ -413,7 +429,7 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
size_t t, s;
s = (wantlen > BUFSZ) ? BUFSZ : wantlen;
- t = qcam_read_bytes(q, tmpbuf, s);
+ t = qcam_read_bytes(qcam, tmpbuf, s);
if (outptr < len) {
size_t sz = len - outptr;
@@ -432,10 +448,10 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
len = outptr;
if (wantlen) {
- printk(KERN_ERR "qcam: short read.\n");
+ v4l2_err(v4l2_dev, "short read.\n");
if (is_bi_dir)
- parport_data_forward(q->pport);
- qc_setup(q);
+ parport_data_forward(qcam->pport);
+ qc_setup(qcam);
return len;
}
@@ -443,49 +459,49 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
int l;
do {
- l = qcam_read_bytes(q, tmpbuf, 3);
+ l = qcam_read_bytes(qcam, tmpbuf, 3);
cond_resched();
} while (l && (tmpbuf[0] == 0x7e || tmpbuf[1] == 0x7e || tmpbuf[2] == 0x7e));
if (force_rgb) {
if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf)
- printk(KERN_ERR "qcam: bad EOF\n");
+ v4l2_err(v4l2_dev, "bad EOF\n");
} else {
if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe)
- printk(KERN_ERR "qcam: bad EOF\n");
+ v4l2_err(v4l2_dev, "bad EOF\n");
}
- qcam_set_ack(q, 0);
- if (qcam_await_ready1(q, 1)) {
- printk(KERN_ERR "qcam: no ack after EOF\n");
- parport_data_forward(q->pport);
- qc_setup(q);
+ qcam_set_ack(qcam, 0);
+ if (qcam_await_ready1(qcam, 1)) {
+ v4l2_err(v4l2_dev, "no ack after EOF\n");
+ parport_data_forward(qcam->pport);
+ qc_setup(qcam);
return len;
}
- parport_data_forward(q->pport);
+ parport_data_forward(qcam->pport);
mdelay(3);
- qcam_set_ack(q, 1);
- if (qcam_await_ready1(q, 0)) {
- printk(KERN_ERR "qcam: no ack to port turnaround\n");
- qc_setup(q);
+ qcam_set_ack(qcam, 1);
+ if (qcam_await_ready1(qcam, 0)) {
+ v4l2_err(v4l2_dev, "no ack to port turnaround\n");
+ qc_setup(qcam);
return len;
}
} else {
int l;
do {
- l = qcam_read_bytes(q, tmpbuf, 1);
+ l = qcam_read_bytes(qcam, tmpbuf, 1);
cond_resched();
} while (l && tmpbuf[0] == 0x7e);
- l = qcam_read_bytes(q, tmpbuf + 1, 2);
+ l = qcam_read_bytes(qcam, tmpbuf + 1, 2);
if (force_rgb) {
if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf)
- printk(KERN_ERR "qcam: bad EOF\n");
+ v4l2_err(v4l2_dev, "bad EOF\n");
} else {
if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe)
- printk(KERN_ERR "qcam: bad EOF\n");
+ v4l2_err(v4l2_dev, "bad EOF\n");
}
}
- qcam_write_data(q, 0);
+ qcam_write_data(qcam, 0);
return len;
}
@@ -493,184 +509,202 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
* Video4linux interfacing
*/
-static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+static int qcam_querycap(struct file *file, void *priv,
+ struct v4l2_capability *vcap)
{
- struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam = (struct qcam_device *)dev;
+ struct qcam *qcam = video_drvdata(file);
- switch (cmd) {
- case VIDIOCGCAP:
- {
- struct video_capability *b = arg;
+ strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
+ strlcpy(vcap->card, "Color Quickcam", sizeof(vcap->card));
+ strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
+ vcap->version = KERNEL_VERSION(0, 0, 3);
+ vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ return 0;
+}
- strcpy(b->name, "Quickcam");
- b->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
- b->channels = 1;
- b->audios = 0;
- b->maxwidth = 320;
- b->maxheight = 240;
- b->minwidth = 80;
- b->minheight = 60;
- return 0;
- }
- case VIDIOCGCHAN:
- {
- struct video_channel *v = arg;
-
- if (v->channel != 0)
- return -EINVAL;
- v->flags = 0;
- v->tuners = 0;
- /* Good question.. its composite or SVHS so.. */
- v->type = VIDEO_TYPE_CAMERA;
- strcpy(v->name, "Camera");
- return 0;
- }
- case VIDIOCSCHAN:
- {
- struct video_channel *v = arg;
+static int qcam_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
+{
+ if (vin->index > 0)
+ return -EINVAL;
+ strlcpy(vin->name, "Camera", sizeof(vin->name));
+ vin->type = V4L2_INPUT_TYPE_CAMERA;
+ vin->audioset = 0;
+ vin->tuner = 0;
+ vin->std = 0;
+ vin->status = 0;
+ return 0;
+}
- if (v->channel != 0)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGTUNER:
- {
- struct video_tuner *v = arg;
-
- if (v->tuner)
- return -EINVAL;
- memset(v, 0, sizeof(*v));
- strcpy(v->name, "Format");
- v->mode = VIDEO_MODE_AUTO;
- return 0;
+static int qcam_g_input(struct file *file, void *fh, unsigned int *inp)
+{
+ *inp = 0;
+ return 0;
+}
+
+static int qcam_s_input(struct file *file, void *fh, unsigned int inp)
+{
+ return (inp > 0) ? -EINVAL : 0;
+}
+
+static int qcam_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 240);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 192);
+ case V4L2_CID_GAMMA:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
}
- case VIDIOCSTUNER:
- {
- struct video_tuner *v = arg;
-
- if (v->tuner)
- return -EINVAL;
- if (v->mode != VIDEO_MODE_AUTO)
- return -EINVAL;
- return 0;
+ return -EINVAL;
+}
+
+static int qcam_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct qcam *qcam = video_drvdata(file);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = qcam->brightness;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = qcam->contrast;
+ break;
+ case V4L2_CID_GAMMA:
+ ctrl->value = qcam->whitebal;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- case VIDIOCGPICT:
- {
- struct video_picture *p = arg;
-
- p->colour = 0x8000;
- p->hue = 0x8000;
- p->brightness = qcam->brightness << 8;
- p->contrast = qcam->contrast << 8;
- p->whiteness = qcam->whitebal << 8;
- p->depth = 24;
- p->palette = VIDEO_PALETTE_RGB24;
- return 0;
+ return ret;
+}
+
+static int qcam_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct qcam *qcam = video_drvdata(file);
+ int ret = 0;
+
+ mutex_lock(&qcam->lock);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ qcam->brightness = ctrl->value;
+ break;
+ case V4L2_CID_CONTRAST:
+ qcam->contrast = ctrl->value;
+ break;
+ case V4L2_CID_GAMMA:
+ qcam->whitebal = ctrl->value;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- case VIDIOCSPICT:
- {
- struct video_picture *p = arg;
-
- /*
- * Sanity check args
- */
- if (p->depth != 24 || p->palette != VIDEO_PALETTE_RGB24)
- return -EINVAL;
-
- /*
- * Now load the camera.
- */
- qcam->brightness = p->brightness >> 8;
- qcam->contrast = p->contrast >> 8;
- qcam->whitebal = p->whiteness >> 8;
-
- mutex_lock(&qcam->lock);
+ if (ret == 0) {
parport_claim_or_block(qcam->pdev);
qc_setup(qcam);
parport_release(qcam->pdev);
- mutex_unlock(&qcam->lock);
- return 0;
}
- case VIDIOCSWIN:
- {
- struct video_window *vw = arg;
-
- if (vw->flags)
- return -EINVAL;
- if (vw->clipcount)
- return -EINVAL;
- if (vw->height < 60 || vw->height > 240)
- return -EINVAL;
- if (vw->width < 80 || vw->width > 320)
- return -EINVAL;
-
- qcam->width = 80;
- qcam->height = 60;
- qcam->mode = QC_DECIMATION_4;
+ mutex_unlock(&qcam->lock);
+ return ret;
+}
- if (vw->width >= 160 && vw->height >= 120) {
- qcam->width = 160;
- qcam->height = 120;
- qcam->mode = QC_DECIMATION_2;
- }
- if (vw->width >= 320 && vw->height >= 240) {
- qcam->width = 320;
- qcam->height = 240;
- qcam->mode = QC_DECIMATION_1;
- }
- qcam->mode |= QC_MILLIONS;
-#if 0
- if (vw->width >= 640 && vw->height >= 480) {
- qcam->width = 640;
- qcam->height = 480;
- qcam->mode = QC_BILLIONS | QC_DECIMATION_1;
- }
-#endif
- /* Ok we figured out what to use from our
- wide choice */
- mutex_lock(&qcam->lock);
- parport_claim_or_block(qcam->pdev);
- qc_setup(qcam);
- parport_release(qcam->pdev);
- mutex_unlock(&qcam->lock);
- return 0;
- }
- case VIDIOCGWIN:
- {
- struct video_window *vw = arg;
- memset(vw, 0, sizeof(*vw));
- vw->width = qcam->width;
- vw->height = qcam->height;
- return 0;
+static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct qcam *qcam = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ pix->width = qcam->width;
+ pix->height = qcam->height;
+ pix->pixelformat = V4L2_PIX_FMT_RGB24;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = 3 * qcam->width;
+ pix->sizeimage = 3 * qcam->width * qcam->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ return 0;
+}
+
+static int qcam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ if (pix->height < 60 || pix->width < 80) {
+ pix->height = 60;
+ pix->width = 80;
+ } else if (pix->height < 120 || pix->width < 160) {
+ pix->height = 120;
+ pix->width = 160;
+ } else {
+ pix->height = 240;
+ pix->width = 320;
}
- case VIDIOCKEY:
- return 0;
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
- return -EINVAL;
+ pix->pixelformat = V4L2_PIX_FMT_RGB24;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = 3 * pix->width;
+ pix->sizeimage = 3 * pix->width * pix->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ return 0;
+}
+
+static int qcam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct qcam *qcam = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int ret = qcam_try_fmt_vid_cap(file, fh, fmt);
+
+ if (ret)
+ return ret;
+ switch (pix->height) {
+ case 60:
+ qcam->mode = QC_DECIMATION_4;
+ break;
+ case 120:
+ qcam->mode = QC_DECIMATION_2;
+ break;
default:
- return -ENOIOCTLCMD;
+ qcam->mode = QC_DECIMATION_1;
+ break;
}
+
+ mutex_lock(&qcam->lock);
+ qcam->mode |= QC_MILLIONS;
+ qcam->height = pix->height;
+ qcam->width = pix->width;
+ parport_claim_or_block(qcam->pdev);
+ qc_setup(qcam);
+ parport_release(qcam->pdev);
+ mutex_unlock(&qcam->lock);
return 0;
}
-static long qcam_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
{
- return video_usercopy(file, cmd, arg, qcam_do_ioctl);
+ static struct v4l2_fmtdesc formats[] = {
+ { 0, 0, 0,
+ "RGB 8:8:8", V4L2_PIX_FMT_RGB24,
+ { 0, 0, 0, 0 }
+ },
+ };
+ enum v4l2_buf_type type = fmt->type;
+
+ if (fmt->index > 0)
+ return -EINVAL;
+
+ *fmt = formats[fmt->index];
+ fmt->type = type;
+ return 0;
}
static ssize_t qcam_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct video_device *v = video_devdata(file);
- struct qcam_device *qcam = (struct qcam_device *)v;
+ struct qcam *qcam = video_drvdata(file);
int len;
mutex_lock(&qcam->lock);
@@ -682,81 +716,80 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
return len;
}
-static int qcam_exclusive_open(struct file *file)
-{
- struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam = (struct qcam_device *)dev;
-
- return test_and_set_bit(0, &qcam->in_use) ? -EBUSY : 0;
-}
-
-static int qcam_exclusive_release(struct file *file)
-{
- struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam = (struct qcam_device *)dev;
-
- clear_bit(0, &qcam->in_use);
- return 0;
-}
-
-/* video device template */
static const struct v4l2_file_operations qcam_fops = {
.owner = THIS_MODULE,
- .open = qcam_exclusive_open,
- .release = qcam_exclusive_release,
- .ioctl = qcam_ioctl,
+ .ioctl = video_ioctl2,
.read = qcam_read,
};
-static struct video_device qcam_template = {
- .name = "Colour QuickCam",
- .fops = &qcam_fops,
- .release = video_device_release_empty,
+static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
+ .vidioc_querycap = qcam_querycap,
+ .vidioc_g_input = qcam_g_input,
+ .vidioc_s_input = qcam_s_input,
+ .vidioc_enum_input = qcam_enum_input,
+ .vidioc_queryctrl = qcam_queryctrl,
+ .vidioc_g_ctrl = qcam_g_ctrl,
+ .vidioc_s_ctrl = qcam_s_ctrl,
+ .vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
};
/* Initialize the QuickCam driver control structure. */
-static struct qcam_device *qcam_init(struct parport *port)
+static struct qcam *qcam_init(struct parport *port)
{
- struct qcam_device *q;
+ struct qcam *qcam;
+ struct v4l2_device *v4l2_dev;
- q = kmalloc(sizeof(struct qcam_device), GFP_KERNEL);
- if (q == NULL)
+ qcam = kzalloc(sizeof(*qcam), GFP_KERNEL);
+ if (qcam == NULL)
return NULL;
- q->pport = port;
- q->pdev = parport_register_device(port, "c-qcam", NULL, NULL,
+ v4l2_dev = &qcam->v4l2_dev;
+ strlcpy(v4l2_dev->name, "c-qcam", sizeof(v4l2_dev->name));
+
+ if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return NULL;
+ }
+
+ qcam->pport = port;
+ qcam->pdev = parport_register_device(port, "c-qcam", NULL, NULL,
NULL, 0, NULL);
- q->bidirectional = (q->pport->modes & PARPORT_MODE_TRISTATE) ? 1 : 0;
+ qcam->bidirectional = (qcam->pport->modes & PARPORT_MODE_TRISTATE) ? 1 : 0;
- if (q->pdev == NULL) {
- printk(KERN_ERR "c-qcam: couldn't register for %s.\n",
- port->name);
- kfree(q);
+ if (qcam->pdev == NULL) {
+ v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
+ kfree(qcam);
return NULL;
}
- memcpy(&q->vdev, &qcam_template, sizeof(qcam_template));
-
- mutex_init(&q->lock);
- q->width = q->ccd_width = 320;
- q->height = q->ccd_height = 240;
- q->mode = QC_MILLIONS | QC_DECIMATION_1;
- q->contrast = 192;
- q->brightness = 240;
- q->whitebal = 128;
- q->top = 1;
- q->left = 14;
- return q;
+ strlcpy(qcam->vdev.name, "Colour QuickCam", sizeof(qcam->vdev.name));
+ qcam->vdev.v4l2_dev = v4l2_dev;
+ qcam->vdev.fops = &qcam_fops;
+ qcam->vdev.ioctl_ops = &qcam_ioctl_ops;
+ qcam->vdev.release = video_device_release_empty;
+ video_set_drvdata(&qcam->vdev, qcam);
+
+ mutex_init(&qcam->lock);
+ qcam->width = qcam->ccd_width = 320;
+ qcam->height = qcam->ccd_height = 240;
+ qcam->mode = QC_MILLIONS | QC_DECIMATION_1;
+ qcam->contrast = 192;
+ qcam->brightness = 240;
+ qcam->whitebal = 128;
+ qcam->top = 1;
+ qcam->left = 14;
+ return qcam;
}
-static struct qcam_device *qcams[MAX_CAMS];
-static unsigned int num_cams;
-
static int init_cqcam(struct parport *port)
{
- struct qcam_device *qcam;
+ struct qcam *qcam;
+ struct v4l2_device *v4l2_dev;
if (parport[0] != -1) {
/* The user gave specific instructions */
@@ -777,6 +810,8 @@ static int init_cqcam(struct parport *port)
if (qcam == NULL)
return -ENODEV;
+ v4l2_dev = &qcam->v4l2_dev;
+
parport_claim_or_block(qcam->pdev);
qc_reset(qcam);
@@ -793,14 +828,14 @@ static int init_cqcam(struct parport *port)
parport_release(qcam->pdev);
if (video_register_device(&qcam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
- printk(KERN_ERR "Unable to register Colour QuickCam on %s\n",
+ v4l2_err(v4l2_dev, "Unable to register Colour QuickCam on %s\n",
qcam->pport->name);
parport_unregister_device(qcam->pdev);
kfree(qcam);
return -ENODEV;
}
- printk(KERN_INFO "%s: Colour QuickCam found on %s\n",
+ v4l2_info(v4l2_dev, "%s: Colour QuickCam found on %s\n",
video_device_node_name(&qcam->vdev), qcam->pport->name);
qcams[num_cams++] = qcam;
@@ -808,7 +843,7 @@ static int init_cqcam(struct parport *port)
return 0;
}
-static void close_cqcam(struct qcam_device *qcam)
+static void close_cqcam(struct qcam *qcam)
{
video_unregister_device(&qcam->vdev);
parport_unregister_device(qcam->pdev);
@@ -833,7 +868,7 @@ static struct parport_driver cqcam_driver = {
static int __init cqcam_init(void)
{
- printk(BANNER "\n");
+ printk(KERN_INFO BANNER "\n");
return parport_register_driver(&cqcam_driver);
}
@@ -852,14 +887,5 @@ MODULE_AUTHOR("Philip Blundell <philb@gnu.org>");
MODULE_DESCRIPTION(BANNER);
MODULE_LICENSE("GPL");
-/* FIXME: parport=auto would never have worked, surely? --RR */
-MODULE_PARM_DESC(parport, "parport=<auto|n[,n]...> for port detection method\n"
- "probe=<0|1|2> for camera detection method\n"
- "force_rgb=<0|1> for RGB data format (default BGR)");
-module_param_array(parport, int, NULL, 0);
-module_param(probe, int, 0);
-module_param(force_rgb, bool, 0);
-module_param(video_nr, int, 0);
-
module_init(cqcam_init);
module_exit(cqcam_cleanup);
diff --git a/drivers/media/video/cx18/cx18-alsa-main.c b/drivers/media/video/cx18/cx18-alsa-main.c
index b5d7cbf4528a..d50d69da387b 100644
--- a/drivers/media/video/cx18/cx18-alsa-main.c
+++ b/drivers/media/video/cx18/cx18-alsa-main.c
@@ -1,7 +1,7 @@
/*
* ALSA interface to cx18 PCM capture streams
*
- * Copyright (C) 2009 Andy Walls <awalls@radix.net>
+ * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
* Copyright (C) 2009 Devin Heitmueller <dheitmueller@kernellabs.com>
*
* Portions of this work were sponsored by ONELAN Limited.
diff --git a/drivers/media/video/cx18/cx18-alsa-mixer.c b/drivers/media/video/cx18/cx18-alsa-mixer.c
index ef21114309fe..341bddc00b77 100644
--- a/drivers/media/video/cx18/cx18-alsa-mixer.c
+++ b/drivers/media/video/cx18/cx18-alsa-mixer.c
@@ -2,7 +2,7 @@
* ALSA mixer controls for the
* ALSA interface to cx18 PCM capture streams
*
- * Copyright (C) 2009 Andy Walls <awalls@radix.net>
+ * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx18/cx18-alsa-mixer.h b/drivers/media/video/cx18/cx18-alsa-mixer.h
index 2d418db000fe..ec9238793f6f 100644
--- a/drivers/media/video/cx18/cx18-alsa-mixer.h
+++ b/drivers/media/video/cx18/cx18-alsa-mixer.h
@@ -2,7 +2,7 @@
* ALSA mixer controls for the
* ALSA interface to cx18 PCM capture streams
*
- * Copyright (C) 2009 Andy Walls <awalls@radix.net>
+ * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx18/cx18-alsa-pcm.c b/drivers/media/video/cx18/cx18-alsa-pcm.c
index 2bd312daeb1e..8f55692db36d 100644
--- a/drivers/media/video/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/video/cx18/cx18-alsa-pcm.c
@@ -2,7 +2,7 @@
* ALSA PCM device for the
* ALSA interface to cx18 PCM capture streams
*
- * Copyright (C) 2009 Andy Walls <awalls@radix.net>
+ * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
* Copyright (C) 2009 Devin Heitmueller <dheitmueller@kernellabs.com>
*
* Portions of this work were sponsored by ONELAN Limited.
diff --git a/drivers/media/video/cx18/cx18-alsa-pcm.h b/drivers/media/video/cx18/cx18-alsa-pcm.h