]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/infiniband/hw/qib/qib_file_ops.c
mm: mark most vm_operations_struct const
[karo-tx-linux.git] / drivers / infiniband / hw / qib / qib_file_ops.c
1 /*
2  * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/pci.h>
36 #include <linux/poll.h>
37 #include <linux/cdev.h>
38 #include <linux/swap.h>
39 #include <linux/vmalloc.h>
40 #include <linux/highmem.h>
41 #include <linux/io.h>
42 #include <linux/jiffies.h>
43 #include <asm/pgtable.h>
44 #include <linux/delay.h>
45 #include <linux/export.h>
46 #include <linux/uio.h>
47
48 #include "qib.h"
49 #include "qib_common.h"
50 #include "qib_user_sdma.h"
51
52 #undef pr_fmt
53 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
54
55 static int qib_open(struct inode *, struct file *);
56 static int qib_close(struct inode *, struct file *);
57 static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
58 static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
59 static unsigned int qib_poll(struct file *, struct poll_table_struct *);
60 static int qib_mmapf(struct file *, struct vm_area_struct *);
61
62 /*
63  * This is really, really weird shit - write() and writev() here
64  * have completely unrelated semantics.  Sucky userland ABI,
65  * film at 11.
66  */
67 static const struct file_operations qib_file_ops = {
68         .owner = THIS_MODULE,
69         .write = qib_write,
70         .write_iter = qib_write_iter,
71         .open = qib_open,
72         .release = qib_close,
73         .poll = qib_poll,
74         .mmap = qib_mmapf,
75         .llseek = noop_llseek,
76 };
77
78 /*
79  * Convert kernel virtual addresses to physical addresses so they don't
80  * potentially conflict with the chip addresses used as mmap offsets.
81  * It doesn't really matter what mmap offset we use as long as we can
82  * interpret it correctly.
83  */
84 static u64 cvt_kvaddr(void *p)
85 {
86         struct page *page;
87         u64 paddr = 0;
88
89         page = vmalloc_to_page(p);
90         if (page)
91                 paddr = page_to_pfn(page) << PAGE_SHIFT;
92
93         return paddr;
94 }
95
96 static int qib_get_base_info(struct file *fp, void __user *ubase,
97                              size_t ubase_size)
98 {
99         struct qib_ctxtdata *rcd = ctxt_fp(fp);
100         int ret = 0;
101         struct qib_base_info *kinfo = NULL;
102         struct qib_devdata *dd = rcd->dd;
103         struct qib_pportdata *ppd = rcd->ppd;
104         unsigned subctxt_cnt;
105         int shared, master;
106         size_t sz;
107
108         subctxt_cnt = rcd->subctxt_cnt;
109         if (!subctxt_cnt) {
110                 shared = 0;
111                 master = 0;
112                 subctxt_cnt = 1;
113         } else {
114                 shared = 1;
115                 master = !subctxt_fp(fp);
116         }
117
118         sz = sizeof(*kinfo);
119         /* If context sharing is not requested, allow the old size structure */
120         if (!shared)
121                 sz -= 7 * sizeof(u64);
122         if (ubase_size < sz) {
123                 ret = -EINVAL;
124                 goto bail;
125         }
126
127         kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
128         if (kinfo == NULL) {
129                 ret = -ENOMEM;
130                 goto bail;
131         }
132
133         ret = dd->f_get_base_info(rcd, kinfo);
134         if (ret < 0)
135                 goto bail;
136
137         kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
138         kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
139         kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
140         kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
141         /*
142          * have to mmap whole thing
143          */
144         kinfo->spi_rcv_egrbuftotlen =
145                 rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
146         kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
147         kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
148                 rcd->rcvegrbuf_chunks;
149         kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
150         if (master)
151                 kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
152         /*
153          * for this use, may be cfgctxts summed over all chips that
154          * are are configured and present
155          */
156         kinfo->spi_nctxts = dd->cfgctxts;
157         /* unit (chip/board) our context is on */
158         kinfo->spi_unit = dd->unit;
159         kinfo->spi_port = ppd->port;
160         /* for now, only a single page */
161         kinfo->spi_tid_maxsize = PAGE_SIZE;
162
163         /*
164          * Doing this per context, and based on the skip value, etc.  This has
165          * to be the actual buffer size, since the protocol code treats it
166          * as an array.
167          *
168          * These have to be set to user addresses in the user code via mmap.
169          * These values are used on return to user code for the mmap target
170          * addresses only.  For 32 bit, same 44 bit address problem, so use
171          * the physical address, not virtual.  Before 2.6.11, using the
172          * page_address() macro worked, but in 2.6.11, even that returns the
173          * full 64 bit address (upper bits all 1's).  So far, using the
174          * physical addresses (or chip offsets, for chip mapping) works, but
175          * no doubt some future kernel release will change that, and we'll be
176          * on to yet another method of dealing with this.
177          * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
178          * since the chips with non-zero rhf_offset don't normally
179          * enable tail register updates to host memory, but for testing,
180          * both can be enabled and used.
181          */
182         kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
183         kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
184         kinfo->spi_rhf_offset = dd->rhf_offset;
185         kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
186         kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
187         /* setup per-unit (not port) status area for user programs */
188         kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
189                 (char *) ppd->statusp -
190                 (char *) dd->pioavailregs_dma;
191         kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
192         if (!shared) {
193                 kinfo->spi_piocnt = rcd->piocnt;
194                 kinfo->spi_piobufbase = (u64) rcd->piobufs;
195                 kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
196         } else if (master) {
197                 kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
198                                     (rcd->piocnt % subctxt_cnt);
199                 /* Master's PIO buffers are after all the slave's */
200                 kinfo->spi_piobufbase = (u64) rcd->piobufs +
201                         dd->palign *
202                         (rcd->piocnt - kinfo->spi_piocnt);
203         } else {
204                 unsigned slave = subctxt_fp(fp) - 1;
205
206                 kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
207                 kinfo->spi_piobufbase = (u64) rcd->piobufs +
208                         dd->palign * kinfo->spi_piocnt * slave;
209         }
210
211         if (shared) {
212                 kinfo->spi_sendbuf_status =
213                         cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
214                 /* only spi_subctxt_* fields should be set in this block! */
215                 kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
216
217                 kinfo->spi_subctxt_rcvegrbuf =
218                         cvt_kvaddr(rcd->subctxt_rcvegrbuf);
219                 kinfo->spi_subctxt_rcvhdr_base =
220                         cvt_kvaddr(rcd->subctxt_rcvhdr_base);
221         }
222
223         /*
224          * All user buffers are 2KB buffers.  If we ever support
225          * giving 4KB buffers to user processes, this will need some
226          * work.  Can't use piobufbase directly, because it has
227          * both 2K and 4K buffer base values.
228          */
229         kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
230                 dd->palign;
231         kinfo->spi_pioalign = dd->palign;
232         kinfo->spi_qpair = QIB_KD_QP;
233         /*
234          * user mode PIO buffers are always 2KB, even when 4KB can
235          * be received, and sent via the kernel; this is ibmaxlen
236          * for 2K MTU.
237          */
238         kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
239         kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
240         kinfo->spi_ctxt = rcd->ctxt;
241         kinfo->spi_subctxt = subctxt_fp(fp);
242         kinfo->spi_sw_version = QIB_KERN_SWVERSION;
243         kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
244         kinfo->spi_hw_version = dd->revision;
245
246         if (master)
247                 kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
248
249         sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
250         if (copy_to_user(ubase, kinfo, sz))
251                 ret = -EFAULT;
252 bail:
253         kfree(kinfo);
254         return ret;
255 }
256
257 /**
258  * qib_tid_update - update a context TID
259  * @rcd: the context
260  * @fp: the qib device file
261  * @ti: the TID information
262  *
263  * The new implementation as of Oct 2004 is that the driver assigns
264  * the tid and returns it to the caller.   To reduce search time, we
265  * keep a cursor for each context, walking the shadow tid array to find
266  * one that's not in use.
267  *
268  * For now, if we can't allocate the full list, we fail, although
269  * in the long run, we'll allocate as many as we can, and the
270  * caller will deal with that by trying the remaining pages later.
271  * That means that when we fail, we have to mark the tids as not in
272  * use again, in our shadow copy.
273  *
274  * It's up to the caller to free the tids when they are done.
275  * We'll unlock the pages as they free them.
276  *
277  * Also, right now we are locking one page at a time, but since
278  * the intended use of this routine is for a single group of
279  * virtually contiguous pages, that should change to improve
280  * performance.
281  */
282 static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
283                           const struct qib_tid_info *ti)
284 {
285         int ret = 0, ntids;
286         u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
287         u16 *tidlist;
288         struct qib_devdata *dd = rcd->dd;
289         u64 physaddr;
290         unsigned long vaddr;
291         u64 __iomem *tidbase;
292         unsigned long tidmap[8];
293         struct page **pagep = NULL;
294         unsigned subctxt = subctxt_fp(fp);
295
296         if (!dd->pageshadow) {
297                 ret = -ENOMEM;
298                 goto done;
299         }
300
301         cnt = ti->tidcnt;
302         if (!cnt) {
303                 ret = -EFAULT;
304                 goto done;
305         }
306         ctxttid = rcd->ctxt * dd->rcvtidcnt;
307         if (!rcd->subctxt_cnt) {
308                 tidcnt = dd->rcvtidcnt;
309                 tid = rcd->tidcursor;
310                 tidoff = 0;
311         } else if (!subctxt) {
312                 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
313                          (dd->rcvtidcnt % rcd->subctxt_cnt);
314                 tidoff = dd->rcvtidcnt - tidcnt;
315                 ctxttid += tidoff;
316                 tid = tidcursor_fp(fp);
317         } else {
318                 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
319                 tidoff = tidcnt * (subctxt - 1);
320                 ctxttid += tidoff;
321                 tid = tidcursor_fp(fp);
322         }
323         if (cnt > tidcnt) {
324                 /* make sure it all fits in tid_pg_list */
325                 qib_devinfo(dd->pcidev,
326                         "Process tried to allocate %u TIDs, only trying max (%u)\n",
327                         cnt, tidcnt);
328                 cnt = tidcnt;
329         }
330         pagep = (struct page **) rcd->tid_pg_list;
331         tidlist = (u16 *) &pagep[dd->rcvtidcnt];
332         pagep += tidoff;
333         tidlist += tidoff;
334
335         memset(tidmap, 0, sizeof(tidmap));
336         /* before decrement; chip actual # */
337         ntids = tidcnt;
338         tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
339                                    dd->rcvtidbase +
340                                    ctxttid * sizeof(*tidbase));
341
342         /* virtual address of first page in transfer */
343         vaddr = ti->tidvaddr;
344         if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
345                        cnt * PAGE_SIZE)) {
346                 ret = -EFAULT;
347                 goto done;
348         }
349         ret = qib_get_user_pages(vaddr, cnt, pagep);
350         if (ret) {
351                 /*
352                  * if (ret == -EBUSY)
353                  * We can't continue because the pagep array won't be
354                  * initialized. This should never happen,
355                  * unless perhaps the user has mpin'ed the pages
356                  * themselves.
357                  */
358                 qib_devinfo(
359                         dd->pcidev,
360                         "Failed to lock addr %p, %u pages: errno %d\n",
361                         (void *) vaddr, cnt, -ret);
362                 goto done;
363         }
364         for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
365                 for (; ntids--; tid++) {
366                         if (tid == tidcnt)
367                                 tid = 0;
368                         if (!dd->pageshadow[ctxttid + tid])
369                                 break;
370                 }
371                 if (ntids < 0) {
372                         /*
373                          * Oops, wrapped all the way through their TIDs,
374                          * and didn't have enough free; see comments at
375                          * start of routine
376                          */
377                         i--;    /* last tidlist[i] not filled in */
378                         ret = -ENOMEM;
379                         break;
380                 }
381                 tidlist[i] = tid + tidoff;
382                 /* we "know" system pages and TID pages are same size */
383                 dd->pageshadow[ctxttid + tid] = pagep[i];
384                 dd->physshadow[ctxttid + tid] =
385                         qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
386                                      PCI_DMA_FROMDEVICE);
387                 /*
388                  * don't need atomic or it's overhead
389                  */
390                 __set_bit(tid, tidmap);
391                 physaddr = dd->physshadow[ctxttid + tid];
392                 /* PERFORMANCE: below should almost certainly be cached */
393                 dd->f_put_tid(dd, &tidbase[tid],
394                                   RCVHQ_RCV_TYPE_EXPECTED, physaddr);
395                 /*
396                  * don't check this tid in qib_ctxtshadow, since we
397                  * just filled it in; start with the next one.
398                  */
399                 tid++;
400         }
401
402         if (ret) {
403                 u32 limit;
404 cleanup:
405                 /* jump here if copy out of updated info failed... */
406                 /* same code that's in qib_free_tid() */
407                 limit = sizeof(tidmap) * BITS_PER_BYTE;
408                 if (limit > tidcnt)
409                         /* just in case size changes in future */
410                         limit = tidcnt;
411                 tid = find_first_bit((const unsigned long *)tidmap, limit);
412                 for (; tid < limit; tid++) {
413                         if (!test_bit(tid, tidmap))
414                                 continue;
415                         if (dd->pageshadow[ctxttid + tid]) {
416                                 dma_addr_t phys;
417
418                                 phys = dd->physshadow[ctxttid + tid];
419                                 dd->physshadow[ctxttid + tid] = dd->tidinvalid;
420                                 /* PERFORMANCE: below should almost certainly
421                                  * be cached
422                                  */
423                                 dd->f_put_tid(dd, &tidbase[tid],
424                                               RCVHQ_RCV_TYPE_EXPECTED,
425                                               dd->tidinvalid);
426                                 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
427                                                PCI_DMA_FROMDEVICE);
428                                 dd->pageshadow[ctxttid + tid] = NULL;
429                         }
430                 }
431                 qib_release_user_pages(pagep, cnt);
432         } else {
433                 /*
434                  * Copy the updated array, with qib_tid's filled in, back
435                  * to user.  Since we did the copy in already, this "should
436                  * never fail" If it does, we have to clean up...
437                  */
438                 if (copy_to_user((void __user *)
439                                  (unsigned long) ti->tidlist,
440                                  tidlist, cnt * sizeof(*tidlist))) {
441                         ret = -EFAULT;
442                         goto cleanup;
443                 }
444                 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
445                                  tidmap, sizeof(tidmap))) {
446                         ret = -EFAULT;
447                         goto cleanup;
448                 }
449                 if (tid == tidcnt)
450                         tid = 0;
451                 if (!rcd->subctxt_cnt)
452                         rcd->tidcursor = tid;
453                 else
454                         tidcursor_fp(fp) = tid;
455         }
456
457 done:
458         return ret;
459 }
460
461 /**
462  * qib_tid_free - free a context TID
463  * @rcd: the context
464  * @subctxt: the subcontext
465  * @ti: the TID info
466  *
467  * right now we are unlocking one page at a time, but since
468  * the intended use of this routine is for a single group of
469  * virtually contiguous pages, that should change to improve
470  * performance.  We check that the TID is in range for this context
471  * but otherwise don't check validity; if user has an error and
472  * frees the wrong tid, it's only their own data that can thereby
473  * be corrupted.  We do check that the TID was in use, for sanity
474  * We always use our idea of the saved address, not the address that
475  * they pass in to us.
476  */
477 static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
478                         const struct qib_tid_info *ti)
479 {
480         int ret = 0;
481         u32 tid, ctxttid, cnt, limit, tidcnt;
482         struct qib_devdata *dd = rcd->dd;
483         u64 __iomem *tidbase;
484         unsigned long tidmap[8];
485
486         if (!dd->pageshadow) {
487                 ret = -ENOMEM;
488                 goto done;
489         }
490
491         if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
492                            sizeof(tidmap))) {
493                 ret = -EFAULT;
494                 goto done;
495         }
496
497         ctxttid = rcd->ctxt * dd->rcvtidcnt;
498         if (!rcd->subctxt_cnt)
499                 tidcnt = dd->rcvtidcnt;
500         else if (!subctxt) {
501                 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
502                          (dd->rcvtidcnt % rcd->subctxt_cnt);
503                 ctxttid += dd->rcvtidcnt - tidcnt;
504         } else {
505                 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
506                 ctxttid += tidcnt * (subctxt - 1);
507         }
508         tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
509                                    dd->rcvtidbase +
510                                    ctxttid * sizeof(*tidbase));
511
512         limit = sizeof(tidmap) * BITS_PER_BYTE;
513         if (limit > tidcnt)
514                 /* just in case size changes in future */
515                 limit = tidcnt;
516         tid = find_first_bit(tidmap, limit);
517         for (cnt = 0; tid < limit; tid++) {
518                 /*
519                  * small optimization; if we detect a run of 3 or so without
520                  * any set, use find_first_bit again.  That's mainly to
521                  * accelerate the case where we wrapped, so we have some at
522                  * the beginning, and some at the end, and a big gap
523                  * in the middle.
524                  */
525                 if (!test_bit(tid, tidmap))
526                         continue;
527                 cnt++;
528                 if (dd->pageshadow[ctxttid + tid]) {
529                         struct page *p;
530                         dma_addr_t phys;
531
532                         p = dd->pageshadow[ctxttid + tid];
533                         dd->pageshadow[ctxttid + tid] = NULL;
534                         phys = dd->physshadow[ctxttid + tid];
535                         dd->physshadow[ctxttid + tid] = dd->tidinvalid;
536                         /* PERFORMANCE: below should almost certainly be
537                          * cached
538                          */
539                         dd->f_put_tid(dd, &tidbase[tid],
540                                       RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
541                         pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
542                                        PCI_DMA_FROMDEVICE);
543                         qib_release_user_pages(&p, 1);
544                 }
545         }
546 done:
547         return ret;
548 }
549
550 /**
551  * qib_set_part_key - set a partition key
552  * @rcd: the context
553  * @key: the key
554  *
555  * We can have up to 4 active at a time (other than the default, which is
556  * always allowed).  This is somewhat tricky, since multiple contexts may set
557  * the same key, so we reference count them, and clean up at exit.  All 4
558  * partition keys are packed into a single qlogic_ib register.  It's an
559  * error for a process to set the same pkey multiple times.  We provide no
560  * mechanism to de-allocate a pkey at this time, we may eventually need to
561  * do that.  I've used the atomic operations, and no locking, and only make
562  * a single pass through what's available.  This should be more than
563  * adequate for some time. I'll think about spinlocks or the like if and as
564  * it's necessary.
565  */
566 static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
567 {
568         struct qib_pportdata *ppd = rcd->ppd;
569         int i, any = 0, pidx = -1;
570         u16 lkey = key & 0x7FFF;
571         int ret;
572
573         if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
574                 /* nothing to do; this key always valid */
575                 ret = 0;
576                 goto bail;
577         }
578
579         if (!lkey) {
580                 ret = -EINVAL;
581                 goto bail;
582         }
583
584         /*
585          * Set the full membership bit, because it has to be
586          * set in the register or the packet, and it seems
587          * cleaner to set in the register than to force all
588          * callers to set it.
589          */
590         key |= 0x8000;
591
592         for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
593                 if (!rcd->pkeys[i] && pidx == -1)
594                         pidx = i;
595                 if (rcd->pkeys[i] == key) {
596                         ret = -EEXIST;
597                         goto bail;
598                 }
599         }
600         if (pidx == -1) {
601                 ret = -EBUSY;
602                 goto bail;
603         }
604         for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
605                 if (!ppd->pkeys[i]) {
606                         any++;
607                         continue;
608                 }
609                 if (ppd->pkeys[i] == key) {
610                         atomic_t *pkrefs = &ppd->pkeyrefs[i];
611
612                         if (atomic_inc_return(pkrefs) > 1) {
613                                 rcd->pkeys[pidx] = key;
614                                 ret = 0;
615                                 goto bail;
616                         } else {
617                                 /*
618                                  * lost race, decrement count, catch below
619                                  */
620                                 atomic_dec(pkrefs);
621                                 any++;
622                         }
623                 }
624                 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
625                         /*
626                          * It makes no sense to have both the limited and
627                          * full membership PKEY set at the same time since
628                          * the unlimited one will disable the limited one.
629                          */
630                         ret = -EEXIST;
631                         goto bail;
632                 }
633         }
634         if (!any) {
635                 ret = -EBUSY;
636                 goto bail;
637         }
638         for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
639                 if (!ppd->pkeys[i] &&
640                     atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
641                         rcd->pkeys[pidx] = key;
642                         ppd->pkeys[i] = key;
643                         (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
644                         ret = 0;
645                         goto bail;
646                 }
647         }
648         ret = -EBUSY;
649
650 bail:
651         return ret;
652 }
653
654 /**
655  * qib_manage_rcvq - manage a context's receive queue
656  * @rcd: the context
657  * @subctxt: the subcontext
658  * @start_stop: action to carry out
659  *
660  * start_stop == 0 disables receive on the context, for use in queue
661  * overflow conditions.  start_stop==1 re-enables, to be used to
662  * re-init the software copy of the head register
663  */
664 static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
665                            int start_stop)
666 {
667         struct qib_devdata *dd = rcd->dd;
668         unsigned int rcvctrl_op;
669
670         if (subctxt)
671                 goto bail;
672         /* atomically clear receive enable ctxt. */
673         if (start_stop) {
674                 /*
675                  * On enable, force in-memory copy of the tail register to
676                  * 0, so that protocol code doesn't have to worry about
677                  * whether or not the chip has yet updated the in-memory
678                  * copy or not on return from the system call. The chip
679                  * always resets it's tail register back to 0 on a
680                  * transition from disabled to enabled.
681                  */
682                 if (rcd->rcvhdrtail_kvaddr)
683                         qib_clear_rcvhdrtail(rcd);
684                 rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
685         } else
686                 rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
687         dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
688         /* always; new head should be equal to new tail; see above */
689 bail:
690         return 0;
691 }
692
693 static void qib_clean_part_key(struct qib_ctxtdata *rcd,
694                                struct qib_devdata *dd)
695 {
696         int i, j, pchanged = 0;
697         u64 oldpkey;
698         struct qib_pportdata *ppd = rcd->ppd;
699
700         /* for debugging only */
701         oldpkey = (u64) ppd->pkeys[0] |
702                 ((u64) ppd->pkeys[1] << 16) |
703                 ((u64) ppd->pkeys[2] << 32) |
704                 ((u64) ppd->pkeys[3] << 48);
705
706         for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
707                 if (!rcd->pkeys[i])
708                         continue;
709                 for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
710                         /* check for match independent of the global bit */
711                         if ((ppd->pkeys[j] & 0x7fff) !=
712                             (rcd->pkeys[i] & 0x7fff))
713                                 continue;
714                         if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
715                                 ppd->pkeys[j] = 0;
716                                 pchanged++;
717                         }
718                         break;
719                 }
720                 rcd->pkeys[i] = 0;
721         }
722         if (pchanged)
723                 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
724 }
725
726 /* common code for the mappings on dma_alloc_coherent mem */
727 static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
728                         unsigned len, void *kvaddr, u32 write_ok, char *what)
729 {
730         struct qib_devdata *dd = rcd->dd;
731         unsigned long pfn;
732         int ret;
733
734         if ((vma->vm_end - vma->vm_start) > len) {
735                 qib_devinfo(dd->pcidev,
736                          "FAIL on %s: len %lx > %x\n", what,
737                          vma->vm_end - vma->vm_start, len);
738                 ret = -EFAULT;
739                 goto bail;
740         }
741
742         /*
743          * shared context user code requires rcvhdrq mapped r/w, others
744          * only allowed readonly mapping.
745          */
746         if (!write_ok) {
747                 if (vma->vm_flags & VM_WRITE) {
748                         qib_devinfo(dd->pcidev,
749                                  "%s must be mapped readonly\n", what);
750                         ret = -EPERM;
751                         goto bail;
752                 }
753
754                 /* don't allow them to later change with mprotect */
755                 vma->vm_flags &= ~VM_MAYWRITE;
756         }
757
758         pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
759         ret = remap_pfn_range(vma, vma->vm_start, pfn,
760                               len, vma->vm_page_prot);
761         if (ret)
762                 qib_devinfo(dd->pcidev,
763                         "%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
764                         what, rcd->ctxt, pfn, len, ret);
765 bail:
766         return ret;
767 }
768
769 static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
770                      u64 ureg)
771 {
772         unsigned long phys;
773         unsigned long sz;
774         int ret;
775
776         /*
777          * This is real hardware, so use io_remap.  This is the mechanism
778          * for the user process to update the head registers for their ctxt
779          * in the chip.
780          */
781         sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
782         if ((vma->vm_end - vma->vm_start) > sz) {
783                 qib_devinfo(dd->pcidev,
784                         "FAIL mmap userreg: reqlen %lx > PAGE\n",
785                         vma->vm_end - vma->vm_start);
786                 ret = -EFAULT;
787         } else {
788                 phys = dd->physaddr + ureg;
789                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
790
791                 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
792                 ret = io_remap_pfn_range(vma, vma->vm_start,
793                                          phys >> PAGE_SHIFT,
794                                          vma->vm_end - vma->vm_start,
795                                          vma->vm_page_prot);
796         }
797         return ret;
798 }
799
800 static int mmap_piobufs(struct vm_area_struct *vma,
801                         struct qib_devdata *dd,
802                         struct qib_ctxtdata *rcd,
803                         unsigned piobufs, unsigned piocnt)
804 {
805         unsigned long phys;
806         int ret;
807
808         /*
809          * When we map the PIO buffers in the chip, we want to map them as
810          * writeonly, no read possible; unfortunately, x86 doesn't allow
811          * for this in hardware, but we still prevent users from asking
812          * for it.
813          */
814         if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
815                 qib_devinfo(dd->pcidev,
816                         "FAIL mmap piobufs: reqlen %lx > PAGE\n",
817                          vma->vm_end - vma->vm_start);
818                 ret = -EINVAL;
819                 goto bail;
820         }
821
822         phys = dd->physaddr + piobufs;
823
824 #if defined(__powerpc__)
825         /* There isn't a generic way to specify writethrough mappings */
826         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
827         pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
828         pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
829 #endif
830
831         /*
832          * don't allow them to later change to readable with mprotect (for when
833          * not initially mapped readable, as is normally the case)
834          */
835         vma->vm_flags &= ~VM_MAYREAD;
836         vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
837
838         /* We used PAT if wc_cookie == 0 */
839         if (!dd->wc_cookie)
840                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
841
842         ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
843                                  vma->vm_end - vma->vm_start,
844                                  vma->vm_page_prot);
845 bail:
846         return ret;
847 }
848
849 static int mmap_rcvegrbufs(struct vm_area_struct *vma,
850                            struct qib_ctxtdata *rcd)
851 {
852         struct qib_devdata *dd = rcd->dd;
853         unsigned long start, size;
854         size_t total_size, i;
855         unsigned long pfn;
856         int ret;
857
858         size = rcd->rcvegrbuf_size;
859         total_size = rcd->rcvegrbuf_chunks * size;
860         if ((vma->vm_end - vma->vm_start) > total_size) {
861                 qib_devinfo(dd->pcidev,
862                         "FAIL on egr bufs: reqlen %lx > actual %lx\n",
863                          vma->vm_end - vma->vm_start,
864                          (unsigned long) total_size);
865                 ret = -EINVAL;
866                 goto bail;
867         }
868
869         if (vma->vm_flags & VM_WRITE) {
870                 qib_devinfo(dd->pcidev,
871                         "Can't map eager buffers as writable (flags=%lx)\n",
872                         vma->vm_flags);
873                 ret = -EPERM;
874                 goto bail;
875         }
876         /* don't allow them to later change to writeable with mprotect */
877         vma->vm_flags &= ~VM_MAYWRITE;
878
879         start = vma->vm_start;
880
881         for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
882                 pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
883                 ret = remap_pfn_range(vma, start, pfn, size,
884                                       vma->vm_page_prot);
885                 if (ret < 0)
886                         goto bail;
887         }
888         ret = 0;
889
890 bail:
891         return ret;
892 }
893
894 /*
895  * qib_file_vma_fault - handle a VMA page fault.
896  */
897 static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
898 {
899         struct page *page;
900
901         page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
902         if (!page)
903                 return VM_FAULT_SIGBUS;
904
905         get_page(page);
906         vmf->page = page;
907
908         return 0;
909 }
910
911 static const struct vm_operations_struct qib_file_vm_ops = {
912         .fault = qib_file_vma_fault,
913 };
914
915 static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
916                        struct qib_ctxtdata *rcd, unsigned subctxt)
917 {
918         struct qib_devdata *dd = rcd->dd;
919         unsigned subctxt_cnt;
920         unsigned long len;
921         void *addr;
922         size_t size;
923         int ret = 0;
924
925         subctxt_cnt = rcd->subctxt_cnt;
926         size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
927
928         /*
929          * Each process has all the subctxt uregbase, rcvhdrq, and
930          * rcvegrbufs mmapped - as an array for all the processes,
931          * and also separately for this process.
932          */
933         if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
934                 addr = rcd->subctxt_uregbase;
935                 size = PAGE_SIZE * subctxt_cnt;
936         } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
937                 addr = rcd->subctxt_rcvhdr_base;
938                 size = rcd->rcvhdrq_size * subctxt_cnt;
939         } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
940                 addr = rcd->subctxt_rcvegrbuf;
941                 size *= subctxt_cnt;
942         } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
943                                         PAGE_SIZE * subctxt)) {
944                 addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
945                 size = PAGE_SIZE;
946         } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
947                                         rcd->rcvhdrq_size * subctxt)) {
948                 addr = rcd->subctxt_rcvhdr_base +
949                         rcd->rcvhdrq_size * subctxt;
950                 size = rcd->rcvhdrq_size;
951         } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
952                 addr = rcd->user_event_mask;
953                 size = PAGE_SIZE;
954         } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
955                                         size * subctxt)) {
956                 addr = rcd->subctxt_rcvegrbuf + size * subctxt;
957                 /* rcvegrbufs are read-only on the slave */
958                 if (vma->vm_flags & VM_WRITE) {
959                         qib_devinfo(dd->pcidev,
960                                  "Can't map eager buffers as writable (flags=%lx)\n",
961                                  vma->vm_flags);
962                         ret = -EPERM;
963                         goto bail;
964                 }
965                 /*
966                  * Don't allow permission to later change to writeable
967                  * with mprotect.
968                  */
969                 vma->vm_flags &= ~VM_MAYWRITE;
970         } else
971                 goto bail;
972         len = vma->vm_end - vma->vm_start;
973         if (len > size) {
974                 ret = -EINVAL;
975                 goto bail;
976         }
977
978         vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
979         vma->vm_ops = &qib_file_vm_ops;
980         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
981         ret = 1;
982
983 bail:
984         return ret;
985 }
986
987 /**
988  * qib_mmapf - mmap various structures into user space
989  * @fp: the file pointer
990  * @vma: the VM area
991  *
992  * We use this to have a shared buffer between the kernel and the user code
993  * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
994  * buffers in the chip.  We have the open and close entries so we can bump
995  * the ref count and keep the driver from being unloaded while still mapped.
996  */
997 static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
998 {
999         struct qib_ctxtdata *rcd;
1000         struct qib_devdata *dd;
1001         u64 pgaddr, ureg;
1002         unsigned piobufs, piocnt;
1003         int ret, match = 1;
1004
1005         rcd = ctxt_fp(fp);
1006         if (!rcd || !(vma->vm_flags & VM_SHARED)) {
1007                 ret = -EINVAL;
1008                 goto bail;
1009         }
1010         dd = rcd->dd;
1011
1012         /*
1013          * This is the qib_do_user_init() code, mapping the shared buffers
1014          * and per-context user registers into the user process. The address
1015          * referred to by vm_pgoff is the file offset passed via mmap().
1016          * For shared contexts, this is the kernel vmalloc() address of the
1017          * pages to share with the master.
1018          * For non-shared or master ctxts, this is a physical address.
1019          * We only do one mmap for each space mapped.
1020          */
1021         pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1022
1023         /*
1024          * Check for 0 in case one of the allocations failed, but user
1025          * called mmap anyway.
1026          */
1027         if (!pgaddr)  {
1028                 ret = -EINVAL;
1029                 goto bail;
1030         }
1031
1032         /*
1033          * Physical addresses must fit in 40 bits for our hardware.
1034          * Check for kernel virtual addresses first, anything else must
1035          * match a HW or memory address.
1036          */
1037         ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
1038         if (ret) {
1039                 if (ret > 0)
1040                         ret = 0;
1041                 goto bail;
1042         }
1043
1044         ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
1045         if (!rcd->subctxt_cnt) {
1046                 /* ctxt is not shared */
1047                 piocnt = rcd->piocnt;
1048                 piobufs = rcd->piobufs;
1049         } else if (!subctxt_fp(fp)) {
1050                 /* caller is the master */
1051                 piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
1052                          (rcd->piocnt % rcd->subctxt_cnt);
1053                 piobufs = rcd->piobufs +
1054                         dd->palign * (rcd->piocnt - piocnt);
1055         } else {
1056                 unsigned slave = subctxt_fp(fp) - 1;
1057
1058                 /* caller is a slave */
1059                 piocnt = rcd->piocnt / rcd->subctxt_cnt;
1060                 piobufs = rcd->piobufs + dd->palign * piocnt * slave;
1061         }
1062
1063         if (pgaddr == ureg)
1064                 ret = mmap_ureg(vma, dd, ureg);
1065         else if (pgaddr == piobufs)
1066                 ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
1067         else if (pgaddr == dd->pioavailregs_phys)
1068                 /* in-memory copy of pioavail registers */
1069                 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1070                                    (void *) dd->pioavailregs_dma, 0,
1071                                    "pioavail registers");
1072         else if (pgaddr == rcd->rcvegr_phys)
1073                 ret = mmap_rcvegrbufs(vma, rcd);
1074         else if (pgaddr == (u64) rcd->rcvhdrq_phys)
1075                 /*
1076                  * The rcvhdrq itself; multiple pages, contiguous
1077                  * from an i/o perspective.  Shared contexts need
1078                  * to map r/w, so we allow writing.
1079                  */
1080                 ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
1081                                    rcd->rcvhdrq, 1, "rcvhdrq");
1082         else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
1083                 /* in-memory copy of rcvhdrq tail register */
1084                 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1085                                    rcd->rcvhdrtail_kvaddr, 0,
1086                                    "rcvhdrq tail");
1087         else
1088                 match = 0;
1089         if (!match)
1090                 ret = -EINVAL;
1091
1092         vma->vm_private_data = NULL;
1093
1094         if (ret < 0)
1095                 qib_devinfo(dd->pcidev,
1096                          "mmap Failure %d: off %llx len %lx\n",
1097                          -ret, (unsigned long long)pgaddr,
1098                          vma->vm_end - vma->vm_start);
1099 bail:
1100         return ret;
1101 }
1102
1103 static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
1104                                     struct file *fp,
1105                                     struct poll_table_struct *pt)
1106 {
1107         struct qib_devdata *dd = rcd->dd;
1108         unsigned pollflag;
1109
1110         poll_wait(fp, &rcd->wait, pt);
1111
1112         spin_lock_irq(&dd->uctxt_lock);
1113         if (rcd->urgent != rcd->urgent_poll) {
1114                 pollflag = POLLIN | POLLRDNORM;
1115                 rcd->urgent_poll = rcd->urgent;
1116         } else {
1117                 pollflag = 0;
1118                 set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
1119         }
1120         spin_unlock_irq(&dd->uctxt_lock);
1121
1122         return pollflag;
1123 }
1124
1125 static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
1126                                   struct file *fp,
1127                                   struct poll_table_struct *pt)
1128 {
1129         struct qib_devdata *dd = rcd->dd;
1130         unsigned pollflag;
1131
1132         poll_wait(fp, &rcd->wait, pt);
1133
1134         spin_lock_irq(&dd->uctxt_lock);
1135         if (dd->f_hdrqempty(rcd)) {
1136                 set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
1137                 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
1138                 pollflag = 0;
1139         } else
1140                 pollflag = POLLIN | POLLRDNORM;
1141         spin_unlock_irq(&dd->uctxt_lock);
1142
1143         return pollflag;
1144 }
1145
1146 static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
1147 {
1148         struct qib_ctxtdata *rcd;
1149         unsigned pollflag;
1150
1151         rcd = ctxt_fp(fp);
1152         if (!rcd)
1153                 pollflag = POLLERR;
1154         else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
1155                 pollflag = qib_poll_urgent(rcd, fp, pt);
1156         else  if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
1157                 pollflag = qib_poll_next(rcd, fp, pt);
1158         else /* invalid */
1159                 pollflag = POLLERR;
1160
1161         return pollflag;
1162 }
1163
1164 static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
1165 {
1166         struct qib_filedata *fd = fp->private_data;
1167         const unsigned int weight = cpumask_weight(&current->cpus_allowed);
1168         const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
1169         int local_cpu;
1170
1171         /*
1172          * If process has NOT already set it's affinity, select and
1173          * reserve a processor for it on the local NUMA node.
1174          */
1175         if ((weight >= qib_cpulist_count) &&
1176                 (cpumask_weight(local_mask) <= qib_cpulist_count)) {
1177                 for_each_cpu(local_cpu, local_mask)
1178                         if (!test_and_set_bit(local_cpu, qib_cpulist)) {
1179                                 fd->rec_cpu_num = local_cpu;
1180                                 return;
1181                         }
1182         }
1183
1184         /*
1185          * If process has NOT already set it's affinity, select and
1186          * reserve a processor for it, as a rendevous for all
1187          * users of the driver.  If they don't actually later
1188          * set affinity to this cpu, or set it to some other cpu,
1189          * it just means that sooner or later we don't recommend
1190          * a cpu, and let the scheduler do it's best.
1191          */
1192         if (weight >= qib_cpulist_count) {
1193                 int cpu;
1194
1195                 cpu = find_first_zero_bit(qib_cpulist,
1196                                           qib_cpulist_count);
1197                 if (cpu == qib_cpulist_count)
1198                         qib_dev_err(dd,
1199                         "no cpus avail for affinity PID %u\n",
1200                         current->pid);
1201                 else {
1202                         __set_bit(cpu, qib_cpulist);
1203                         fd->rec_cpu_num = cpu;
1204                 }
1205         }
1206 }
1207
1208 /*
1209  * Check that userland and driver are compatible for subcontexts.
1210  */
1211 static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
1212 {
1213         /* this code is written long-hand for clarity */
1214         if (QIB_USER_SWMAJOR != user_swmajor) {
1215                 /* no promise of compatibility if major mismatch */
1216                 return 0;
1217         }
1218         if (QIB_USER_SWMAJOR == 1) {
1219                 switch (QIB_USER_SWMINOR) {
1220                 case 0:
1221                 case 1:
1222                 case 2:
1223                         /* no subctxt implementation so cannot be compatible */
1224                         return 0;
1225                 case 3:
1226                         /* 3 is only compatible with itself */
1227                         return user_swminor == 3;
1228                 default:
1229                         /* >= 4 are compatible (or are expected to be) */
1230                         return user_swminor <= QIB_USER_SWMINOR;
1231                 }
1232         }
1233         /* make no promises yet for future major versions */
1234         return 0;
1235 }
1236
1237 static int init_subctxts(struct qib_devdata *dd,
1238                          struct qib_ctxtdata *rcd,
1239                          const struct qib_user_info *uinfo)
1240 {
1241         int ret = 0;
1242         unsigned num_subctxts;
1243         size_t size;
1244
1245         /*
1246          * If the user is requesting zero subctxts,
1247          * skip the subctxt allocation.
1248          */
1249         if (uinfo->spu_subctxt_cnt <= 0)
1250                 goto bail;
1251         num_subctxts = uinfo->spu_subctxt_cnt;
1252
1253         /* Check for subctxt compatibility */
1254         if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1255                 uinfo->spu_userversion & 0xffff)) {
1256                 qib_devinfo(dd->pcidev,
1257                          "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
1258                          (int) (uinfo->spu_userversion >> 16),
1259                          (int) (uinfo->spu_userversion & 0xffff),
1260                          QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
1261                 goto bail;
1262         }
1263         if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
1264                 ret = -EINVAL;
1265                 goto bail;
1266         }
1267
1268         rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
1269         if (!rcd->subctxt_uregbase) {
1270                 ret = -ENOMEM;
1271                 goto bail;
1272         }
1273         /* Note: rcd->rcvhdrq_size isn't initialized yet. */
1274         size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1275                      sizeof(u32), PAGE_SIZE) * num_subctxts;
1276         rcd->subctxt_rcvhdr_base = vmalloc_user(size);
1277         if (!rcd->subctxt_rcvhdr_base) {
1278                 ret = -ENOMEM;
1279                 goto bail_ureg;
1280         }
1281
1282         rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
1283                                               rcd->rcvegrbuf_size *
1284                                               num_subctxts);
1285         if (!rcd->subctxt_rcvegrbuf) {
1286                 ret = -ENOMEM;
1287                 goto bail_rhdr;
1288         }
1289
1290         rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
1291         rcd->subctxt_id = uinfo->spu_subctxt_id;
1292         rcd->active_slaves = 1;
1293         rcd->redirect_seq_cnt = 1;
1294         set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1295         goto bail;
1296
1297 bail_rhdr:
1298         vfree(rcd->subctxt_rcvhdr_base);
1299 bail_ureg:
1300         vfree(rcd->subctxt_uregbase);
1301         rcd->subctxt_uregbase = NULL;
1302 bail:
1303         return ret;
1304 }
1305
1306 static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
1307                       struct file *fp, const struct qib_user_info *uinfo)
1308 {
1309         struct qib_filedata *fd = fp->private_data;
1310         struct qib_devdata *dd = ppd->dd;
1311         struct qib_ctxtdata *rcd;
1312         void *ptmp = NULL;
1313         int ret;
1314         int numa_id;
1315
1316         assign_ctxt_affinity(fp, dd);
1317
1318         numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ?
1319                 cpu_to_node(fd->rec_cpu_num) :
1320                 numa_node_id()) : dd->assigned_node_id;
1321
1322         rcd = qib_create_ctxtdata(ppd, ctxt, numa_id);
1323
1324         /*
1325          * Allocate memory for use in qib_tid_update() at open to
1326          * reduce cost of expected send setup per message segment
1327          */
1328         if (rcd)
1329                 ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
1330                                dd->rcvtidcnt * sizeof(struct page **),
1331                                GFP_KERNEL);
1332
1333         if (!rcd || !ptmp) {
1334                 qib_dev_err(dd,
1335                         "Unable to allocate ctxtdata memory, failing open\n");
1336                 ret = -ENOMEM;
1337                 goto bailerr;
1338         }
1339         rcd->userversion = uinfo->spu_userversion;
1340         ret = init_subctxts(dd, rcd, uinfo);
1341         if (ret)
1342                 goto bailerr;
1343         rcd->tid_pg_list = ptmp;
1344         rcd->pid = current->pid;
1345         init_waitqueue_head(&dd->rcd[ctxt]->wait);
1346         strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
1347         ctxt_fp(fp) = rcd;
1348         qib_stats.sps_ctxts++;
1349         dd->freectxts--;
1350         ret = 0;
1351         goto bail;
1352
1353 bailerr:
1354         if (fd->rec_cpu_num != -1)
1355                 __clear_bit(fd->rec_cpu_num, qib_cpulist);
1356
1357         dd->rcd[ctxt] = NULL;
1358         kfree(rcd);
1359         kfree(ptmp);
1360 bail:
1361         return ret;
1362 }
1363
1364 static inline int usable(struct qib_pportdata *ppd)
1365 {
1366         struct qib_devdata *dd = ppd->dd;
1367
1368         return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
1369                 (ppd->lflags & QIBL_LINKACTIVE);
1370 }
1371
1372 /*
1373  * Select a context on the given device, either using a requested port
1374  * or the port based on the context number.
1375  */
1376 static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1377                             const struct qib_user_info *uinfo)
1378 {
1379         struct qib_pportdata *ppd = NULL;
1380         int ret, ctxt;
1381
1382         if (port) {
1383                 if (!usable(dd->pport + port - 1)) {
1384                         ret = -ENETDOWN;
1385                         goto done;
1386                 } else
1387                         ppd = dd->pport + port - 1;
1388         }
1389         for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
1390              ctxt++)
1391                 ;
1392         if (ctxt == dd->cfgctxts) {
1393                 ret = -EBUSY;
1394                 goto done;
1395         }
1396         if (!ppd) {
1397                 u32 pidx = ctxt % dd->num_pports;
1398
1399                 if (usable(dd->pport + pidx))
1400                         ppd = dd->pport + pidx;
1401                 else {
1402                         for (pidx = 0; pidx < dd->num_pports && !ppd;
1403                              pidx++)
1404                                 if (usable(dd->pport + pidx))
1405                                         ppd = dd->pport + pidx;
1406                 }
1407         }
1408         ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
1409 done:
1410         return ret;
1411 }
1412
1413 static int find_free_ctxt(int unit, struct file *fp,
1414                           const struct qib_user_info *uinfo)
1415 {
1416         struct qib_devdata *dd = qib_lookup(unit);
1417         int ret;
1418
1419         if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
1420                 ret = -ENODEV;
1421         else
1422                 ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
1423
1424         return ret;
1425 }
1426
1427 static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1428                       unsigned alg)
1429 {
1430         struct qib_devdata *udd = NULL;
1431         int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
1432         u32 port = uinfo->spu_port, ctxt;
1433
1434         devmax = qib_count_units(&npresent, &nup);
1435         if (!npresent) {
1436                 ret = -ENXIO;
1437                 goto done;
1438         }
1439         if (nup == 0) {
1440                 ret = -ENETDOWN;
1441                 goto done;
1442         }
1443
1444         if (alg == QIB_PORT_ALG_ACROSS) {
1445                 unsigned inuse = ~0U;
1446
1447                 /* find device (with ACTIVE ports) with fewest ctxts in use */
1448                 for (ndev = 0; ndev < devmax; ndev++) {
1449                         struct qib_devdata *dd = qib_lookup(ndev);
1450                         unsigned cused = 0, cfree = 0, pusable = 0;
1451
1452                         if (!dd)
1453                                 continue;
1454                         if (port && port <= dd->num_pports &&
1455                             usable(dd->pport + port - 1))
1456                                 pusable = 1;
1457                         else
1458                                 for (i = 0; i < dd->num_pports; i++)
1459                                         if (usable(dd->pport + i))
1460                                                 pusable++;
1461                         if (!pusable)
1462                                 continue;
1463                         for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
1464                              ctxt++)
1465                                 if (dd->rcd[ctxt])
1466                                         cused++;
1467                                 else
1468                                         cfree++;
1469                         if (cfree && cused < inuse) {
1470                                 udd = dd;
1471                                 inuse = cused;
1472                         }
1473                 }
1474                 if (udd) {
1475                         ret = choose_port_ctxt(fp, udd, port, uinfo);
1476                         goto done;
1477                 }
1478         } else {
1479                 for (ndev = 0; ndev < devmax; ndev++) {
1480                         struct qib_devdata *dd = qib_lookup(ndev);
1481
1482                         if (dd) {
1483                                 ret = choose_port_ctxt(fp, dd, port, uinfo);
1484                                 if (!ret)
1485                                         goto done;
1486                                 if (ret == -EBUSY)
1487                                         dusable++;
1488                         }
1489                 }
1490         }
1491         ret = dusable ? -EBUSY : -ENETDOWN;
1492
1493 done:
1494         return ret;
1495 }
1496
1497 static int find_shared_ctxt(struct file *fp,
1498                             const struct qib_user_info *uinfo)
1499 {
1500         int devmax, ndev, i;
1501         int ret = 0;
1502
1503         devmax = qib_count_units(NULL, NULL);
1504
1505         for (ndev = 0; ndev < devmax; ndev++) {
1506                 struct qib_devdata *dd = qib_lookup(ndev);
1507
1508                 /* device portion of usable() */
1509                 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1510                         continue;
1511                 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
1512                         struct qib_ctxtdata *rcd = dd->rcd[i];
1513
1514                         /* Skip ctxts which are not yet open */
1515                         if (!rcd || !rcd->cnt)
1516                                 continue;
1517                         /* Skip ctxt if it doesn't match the requested one */
1518                         if (rcd->subctxt_id != uinfo->spu_subctxt_id)
1519                                 continue;
1520                         /* Verify the sharing process matches the master */
1521                         if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
1522                             rcd->userversion != uinfo->spu_userversion ||
1523                             rcd->cnt >= rcd->subctxt_cnt) {
1524                                 ret = -EINVAL;
1525                                 goto done;
1526                         }
1527                         ctxt_fp(fp) = rcd;
1528                         subctxt_fp(fp) = rcd->cnt++;
1529                         rcd->subpid[subctxt_fp(fp)] = current->pid;
1530                         tidcursor_fp(fp) = 0;
1531                         rcd->active_slaves |= 1 << subctxt_fp(fp);
1532                         ret = 1;
1533                         goto done;
1534                 }
1535         }
1536
1537 done:
1538         return ret;
1539 }
1540
1541 static int qib_open(struct inode *in, struct file *fp)
1542 {
1543         /* The real work is performed later in qib_assign_ctxt() */
1544         fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
1545         if (fp->private_data) /* no cpu affinity by default */
1546                 ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
1547         return fp->private_data ? 0 : -ENOMEM;
1548 }
1549
1550 static int find_hca(unsigned int cpu, int *unit)
1551 {
1552         int ret = 0, devmax, npresent, nup, ndev;
1553
1554         *unit = -1;
1555
1556         devmax = qib_count_units(&npresent, &nup);
1557         if (!npresent) {
1558                 ret = -ENXIO;
1559                 goto done;
1560         }
1561         if (!nup) {
1562                 ret = -ENETDOWN;
1563                 goto done;
1564         }
1565         for (ndev = 0; ndev < devmax; ndev++) {
1566                 struct qib_devdata *dd = qib_lookup(ndev);
1567
1568                 if (dd) {
1569                         if (pcibus_to_node(dd->pcidev->bus) < 0) {
1570                                 ret = -EINVAL;
1571                                 goto done;
1572                         }
1573                         if (cpu_to_node(cpu) ==
1574                                 pcibus_to_node(dd->pcidev->bus)) {
1575                                 *unit = ndev;
1576                                 goto done;
1577                         }
1578                 }
1579         }
1580 done:
1581         return ret;
1582 }
1583
1584 static int do_qib_user_sdma_queue_create(struct file *fp)
1585 {
1586         struct qib_filedata *fd = fp->private_data;
1587         struct qib_ctxtdata *rcd = fd->rcd;
1588         struct qib_devdata *dd = rcd->dd;
1589
1590         if (dd->flags & QIB_HAS_SEND_DMA) {
1591
1592                 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
1593                                                     dd->unit,
1594                                                     rcd->ctxt,
1595                                                     fd->subctxt);
1596                 if (!fd->pq)
1597                         return -ENOMEM;
1598         }
1599
1600         return 0;
1601 }
1602
1603 /*
1604  * Get ctxt early, so can set affinity prior to memory allocation.
1605  */
1606 static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1607 {
1608         int ret;
1609         int i_minor;
1610         unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
1611
1612         /* Check to be sure we haven't already initialized this file */
1613         if (ctxt_fp(fp)) {
1614                 ret = -EINVAL;
1615                 goto done;
1616         }
1617
1618         /* for now, if major version is different, bail */
1619         swmajor = uinfo->spu_userversion >> 16;
1620         if (swmajor != QIB_USER_SWMAJOR) {
1621                 ret = -ENODEV;
1622                 goto done;
1623         }
1624
1625         swminor = uinfo->spu_userversion & 0xffff;
1626
1627         if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
1628                 alg = uinfo->spu_port_alg;
1629
1630         mutex_lock(&qib_mutex);
1631
1632         if (qib_compatible_subctxts(swmajor, swminor) &&
1633             uinfo->spu_subctxt_cnt) {
1634                 ret = find_shared_ctxt(fp, uinfo);
1635                 if (ret > 0) {
1636                         ret = do_qib_user_sdma_queue_create(fp);
1637                         if (!ret)
1638                                 assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
1639                         goto done_ok;
1640                 }
1641         }
1642
1643         i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
1644         if (i_minor)
1645                 ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1646         else {
1647                 int unit;
1648                 const unsigned int cpu = cpumask_first(&current->cpus_allowed);
1649                 const unsigned int weight =
1650                         cpumask_weight(&current->cpus_allowed);
1651
1652                 if (weight == 1 && !test_bit(cpu, qib_cpulist))
1653                         if (!find_hca(cpu, &unit) && unit >= 0)
1654                                 if (!find_free_ctxt(unit, fp, uinfo)) {
1655                                         ret = 0;
1656                                         goto done_chk_sdma;
1657                                 }
1658                 ret = get_a_ctxt(fp, uinfo, alg);
1659         }
1660
1661 done_chk_sdma:
1662         if (!ret)
1663                 ret = do_qib_user_sdma_queue_create(fp);
1664 done_ok:
1665         mutex_unlock(&qib_mutex);
1666
1667 done:
1668         return ret;
1669 }
1670
1671
1672 static int qib_do_user_init(struct file *fp,
1673                             const struct qib_user_info *uinfo)
1674 {
1675         int ret;
1676         struct qib_ctxtdata *rcd = ctxt_fp(fp);
1677         struct qib_devdata *dd;
1678         unsigned uctxt;
1679
1680         /* Subctxts don't need to initialize anything since master did it. */
1681         if (subctxt_fp(fp)) {
1682                 ret = wait_event_interruptible(rcd->wait,
1683                         !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
1684                 goto bail;
1685         }
1686
1687         dd = rcd->dd;
1688
1689         /* some ctxts may get extra buffers, calculate that here */
1690         uctxt = rcd->ctxt - dd->first_user_ctxt;
1691         if (uctxt < dd->ctxts_extrabuf) {
1692                 rcd->piocnt = dd->pbufsctxt + 1;
1693                 rcd->pio_base = rcd->piocnt * uctxt;
1694         } else {
1695                 rcd->piocnt = dd->pbufsctxt;
1696                 rcd->pio_base = rcd->piocnt * uctxt +
1697                         dd->ctxts_extrabuf;
1698         }
1699
1700         /*
1701          * All user buffers are 2KB buffers.  If we ever support
1702          * giving 4KB buffers to user processes, this will need some
1703          * work.  Can't use piobufbase directly, because it has
1704          * both 2K and 4K buffer base values.  So check and handle.
1705          */
1706         if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
1707                 if (rcd->pio_base >= dd->piobcnt2k) {
1708                         qib_dev_err(dd,
1709                                     "%u:ctxt%u: no 2KB buffers available\n",
1710                                     dd->unit, rcd->ctxt);
1711                         ret = -ENOBUFS;
1712                         goto bail;
1713                 }
1714                 rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
1715                 qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
1716                             rcd->ctxt, rcd->piocnt);
1717         }
1718
1719         rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
1720         qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1721                                TXCHK_CHG_TYPE_USER, rcd);
1722         /*
1723          * try to ensure that processes start up with consistent avail update
1724          * for their own range, at least.   If system very quiet, it might
1725          * have the in-memory copy out of date at startup for this range of
1726          * buffers, when a context gets re-used.  Do after the chg_pioavail
1727          * and before the rest of setup, so it's "almost certain" the dma
1728          * will have occurred (can't 100% guarantee, but should be many
1729          * decimals of 9s, with this ordering), given how much else happens
1730          * after this.
1731          */
1732         dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
1733
1734         /*
1735          * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1736          * array for time being.  If rcd->ctxt > chip-supported,
1737          * we need to do extra stuff here to handle by handling overflow
1738          * through ctxt 0, someday
1739          */
1740         ret = qib_create_rcvhdrq(dd, rcd);
1741         if (!ret)
1742                 ret = qib_setup_eagerbufs(rcd);
1743         if (ret)
1744                 goto bail_pio;
1745
1746         rcd->tidcursor = 0; /* start at beginning after open */
1747
1748         /* initialize poll variables... */
1749         rcd->urgent = 0;
1750         rcd->urgent_poll = 0;
1751
1752         /*
1753          * Now enable the ctxt for receive.
1754          * For chips that are set to DMA the tail register to memory
1755          * when they change (and when the update bit transitions from
1756          * 0 to 1.  So for those chips, we turn it off and then back on.
1757          * This will (very briefly) affect any other open ctxts, but the
1758          * duration is very short, and therefore isn't an issue.  We
1759          * explicitly set the in-memory tail copy to 0 beforehand, so we
1760          * don't have to wait to be sure the DMA update has happened
1761          * (chip resets head/tail to 0 on transition to enable).
1762          */
1763         if (rcd->rcvhdrtail_kvaddr)
1764                 qib_clear_rcvhdrtail(rcd);
1765
1766         dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
1767                       rcd->ctxt);
1768
1769         /* Notify any waiting slaves */
1770         if (rcd->subctxt_cnt) {
1771                 clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1772                 wake_up(&rcd->wait);
1773         }
1774         return 0;
1775
1776 bail_pio:
1777         qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1778                                TXCHK_CHG_TYPE_KERN, rcd);
1779 bail:
1780         return ret;
1781 }
1782
1783 /**
1784  * unlock_exptid - unlock any expected TID entries context still had in use
1785  * @rcd: ctxt
1786  *
1787  * We don't actually update the chip here, because we do a bulk update
1788  * below, using f_clear_tids.
1789  */
1790 static void unlock_expected_tids(struct qib_ctxtdata *rcd)
1791 {
1792         struct qib_devdata *dd = rcd->dd;
1793         int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
1794         int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
1795
1796         for (i = ctxt_tidbase; i < maxtid; i++) {
1797                 struct page *p = dd->pageshadow[i];
1798                 dma_addr_t phys;
1799
1800                 if (!p)
1801                         continue;
1802
1803                 phys = dd->physshadow[i];
1804                 dd->physshadow[i] = dd->tidinvalid;
1805                 dd->pageshadow[i] = NULL;
1806                 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
1807                                PCI_DMA_FROMDEVICE);
1808                 qib_release_user_pages(&p, 1);
1809                 cnt++;
1810         }
1811 }
1812
1813 static int qib_close(struct inode *in, struct file *fp)
1814 {
1815         int ret = 0;
1816         struct qib_filedata *fd;
1817         struct qib_ctxtdata *rcd;
1818         struct qib_devdata *dd;
1819         unsigned long flags;
1820         unsigned ctxt;
1821         pid_t pid;
1822
1823         mutex_lock(&qib_mutex);
1824
1825         fd = fp->private_data;
1826         fp->private_data = NULL;
1827         rcd = fd->rcd;
1828         if (!rcd) {
1829                 mutex_unlock(&qib_mutex);
1830                 goto bail;
1831         }
1832
1833         dd = rcd->dd;
1834
1835         /* ensure all pio buffer writes in progress are flushed */
1836         qib_flush_wc();
1837
1838         /* drain user sdma queue */
1839         if (fd->pq) {
1840                 qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
1841                 qib_user_sdma_queue_destroy(fd->pq);
1842         }
1843
1844         if (fd->rec_cpu_num != -1)
1845                 __clear_bit(fd->rec_cpu_num, qib_cpulist);
1846
1847         if (--rcd->cnt) {
1848                 /*
1849                  * XXX If the master closes the context before the slave(s),
1850                  * revoke the mmap for the eager receive queue so
1851                  * the slave(s) don't wait for receive data forever.
1852                  */
1853                 rcd->active_slaves &= ~(1 << fd->subctxt);
1854                 rcd->subpid[fd->subctxt] = 0;
1855                 mutex_unlock(&qib_mutex);
1856                 goto bail;
1857         }
1858
1859         /* early; no interrupt users after this */
1860         spin_lock_irqsave(&dd->uctxt_lock, flags);
1861         ctxt = rcd->ctxt;
1862         dd->rcd[ctxt] = NULL;
1863         pid = rcd->pid;
1864         rcd->pid = 0;
1865         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1866
1867         if (rcd->rcvwait_to || rcd->piowait_to ||
1868             rcd->rcvnowait || rcd->pionowait) {
1869                 rcd->rcvwait_to = 0;
1870                 rcd->piowait_to = 0;
1871                 rcd->rcvnowait = 0;
1872                 rcd->pionowait = 0;
1873         }
1874         if (rcd->flag)
1875                 rcd->flag = 0;
1876
1877         if (dd->kregbase) {
1878                 /* atomically clear receive enable ctxt and intr avail. */
1879                 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
1880                                   QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
1881
1882                 /* clean up the pkeys for this ctxt user */
1883                 qib_clean_part_key(rcd, dd);
1884                 qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
1885                 qib_chg_pioavailkernel(dd, rcd->pio_base,
1886                                        rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
1887
1888                 dd->f_clear_tids(dd, rcd);
1889
1890                 if (dd->pageshadow)
1891                         unlock_expected_tids(rcd);
1892                 qib_stats.sps_ctxts--;
1893                 dd->freectxts++;
1894         }
1895
1896         mutex_unlock(&qib_mutex);
1897         qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
1898
1899 bail:
1900         kfree(fd);
1901         return ret;
1902 }
1903
1904 static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
1905 {
1906         struct qib_ctxt_info info;
1907         int ret;
1908         size_t sz;
1909         struct qib_ctxtdata *rcd = ctxt_fp(fp);
1910         struct qib_filedata *fd;
1911
1912         fd = fp->private_data;
1913
1914         info.num_active = qib_count_active_units();
1915         info.unit = rcd->dd->unit;
1916         info.port = rcd->ppd->port;
1917         info.ctxt = rcd->ctxt;
1918         info.subctxt =  subctxt_fp(fp);
1919         /* Number of user ctxts available for this device. */
1920         info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
1921         info.num_subctxts = rcd->subctxt_cnt;
1922         info.rec_cpu = fd->rec_cpu_num;
1923         sz = sizeof(info);
1924
1925         if (copy_to_user(uinfo, &info, sz)) {
1926                 ret = -EFAULT;
1927                 goto bail;
1928         }
1929         ret = 0;
1930
1931 bail:
1932         return ret;
1933 }
1934
1935 static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
1936                                  u32 __user *inflightp)
1937 {
1938         const u32 val = qib_user_sdma_inflight_counter(pq);
1939
1940         if (put_user(val, inflightp))
1941                 return -EFAULT;
1942
1943         return 0;
1944 }
1945
1946 static int qib_sdma_get_complete(struct qib_pportdata *ppd,
1947                                  struct qib_user_sdma_queue *pq,
1948                                  u32 __user *completep)
1949 {
1950         u32 val;
1951         int err;
1952
1953         if (!pq)
1954                 return -EINVAL;
1955
1956         err = qib_user_sdma_make_progress(ppd, pq);
1957         if (err < 0)
1958                 return err;
1959
1960         val = qib_user_sdma_complete_counter(pq);
1961         if (put_user(val, completep))
1962                 return -EFAULT;
1963
1964         return 0;
1965 }
1966
1967 static int disarm_req_delay(struct qib_ctxtdata *rcd)
1968 {
1969         int ret = 0;
1970
1971         if (!usable(rcd->ppd)) {
1972                 int i;
1973                 /*
1974                  * if link is down, or otherwise not usable, delay
1975                  * the caller up to 30 seconds, so we don't thrash
1976                  * in trying to get the chip back to ACTIVE, and
1977                  * set flag so they make the call again.
1978                  */
1979                 if (rcd->user_event_mask) {
1980                         /*
1981                          * subctxt_cnt is 0 if not shared, so do base
1982                          * separately, first, then remaining subctxt, if any
1983                          */
1984                         set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1985                                 &rcd->user_event_mask[0]);
1986                         for (i = 1; i < rcd->subctxt_cnt; i++)
1987                                 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1988                                         &rcd->user_event_mask[i]);
1989                 }
1990                 for (i = 0; !usable(rcd->ppd) && i < 300; i++)
1991                         msleep(100);
1992                 ret = -ENETDOWN;
1993         }
1994         return ret;
1995 }
1996
1997 /*
1998  * Find all user contexts in use, and set the specified bit in their
1999  * event mask.
2000  * See also find_ctxt() for a similar use, that is specific to send buffers.
2001  */
2002 int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
2003 {
2004         struct qib_ctxtdata *rcd;
2005         unsigned ctxt;
2006         int ret = 0;
2007         unsigned long flags;
2008
2009         spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
2010         for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
2011              ctxt++) {
2012                 rcd = ppd->dd->rcd[ctxt];
2013                 if (!rcd)
2014                         continue;
2015                 if (rcd->user_event_mask) {
2016                         int i;
2017                         /*
2018                          * subctxt_cnt is 0 if not shared, so do base
2019                          * separately, first, then remaining subctxt, if any
2020                          */
2021                         set_bit(evtbit, &rcd->user_event_mask[0]);
2022                         for (i = 1; i < rcd->subctxt_cnt; i++)
2023                                 set_bit(evtbit, &rcd->user_event_mask[i]);
2024                 }
2025                 ret = 1;
2026                 break;
2027         }
2028         spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
2029
2030         return ret;
2031 }
2032
2033 /*
2034  * clear the event notifier events for this context.
2035  * For the DISARM_BUFS case, we also take action (this obsoletes
2036  * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
2037  * compatibility.
2038  * Other bits don't currently require actions, just atomically clear.
2039  * User process then performs actions appropriate to bit having been
2040  * set, if desired, and checks again in future.
2041  */
2042 static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
2043                               unsigned long events)
2044 {
2045         int ret = 0, i;
2046
2047         for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
2048                 if (!test_bit(i, &events))
2049                         continue;
2050                 if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
2051                         (void)qib_disarm_piobufs_ifneeded(rcd);
2052                         ret = disarm_req_delay(rcd);
2053                 } else
2054                         clear_bit(i, &rcd->user_event_mask[subctxt]);
2055         }
2056         return ret;
2057 }
2058
2059 static ssize_t qib_write(struct file *fp, const char __user *data,
2060                          size_t count, loff_t *off)
2061 {
2062         const struct qib_cmd __user *ucmd;
2063         struct qib_ctxtdata *rcd;
2064         const void __user *src;
2065         size_t consumed, copy = 0;
2066         struct qib_cmd cmd;
2067         ssize_t ret = 0;
2068         void *dest;
2069
2070         if (count < sizeof(cmd.type)) {
2071                 ret = -EINVAL;
2072                 goto bail;
2073         }
2074
2075         ucmd = (const struct qib_cmd __user *) data;
2076
2077         if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
2078                 ret = -EFAULT;
2079                 goto bail;
2080         }
2081
2082         consumed = sizeof(cmd.type);
2083
2084         switch (cmd.type) {
2085         case QIB_CMD_ASSIGN_CTXT:
2086         case QIB_CMD_USER_INIT:
2087                 copy = sizeof(cmd.cmd.user_info);
2088                 dest = &cmd.cmd.user_info;
2089                 src = &ucmd->cmd.user_info;
2090                 break;
2091
2092         case QIB_CMD_RECV_CTRL:
2093                 copy = sizeof(cmd.cmd.recv_ctrl);
2094                 dest = &cmd.cmd.recv_ctrl;
2095                 src = &ucmd->cmd.recv_ctrl;
2096                 break;
2097
2098         case QIB_CMD_CTXT_INFO:
2099                 copy = sizeof(cmd.cmd.ctxt_info);
2100                 dest = &cmd.cmd.ctxt_info;
2101                 src = &ucmd->cmd.ctxt_info;
2102                 break;
2103
2104         case QIB_CMD_TID_UPDATE:
2105         case QIB_CMD_TID_FREE:
2106                 copy = sizeof(cmd.cmd.tid_info);
2107                 dest = &cmd.cmd.tid_info;
2108                 src = &ucmd->cmd.tid_info;
2109                 break;
2110
2111         case QIB_CMD_SET_PART_KEY:
2112                 copy = sizeof(cmd.cmd.part_key);
2113                 dest = &cmd.cmd.part_key;
2114                 src = &ucmd->cmd.part_key;
2115                 break;
2116
2117         case QIB_CMD_DISARM_BUFS:
2118         case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
2119                 copy = 0;
2120                 src = NULL;
2121                 dest = NULL;
2122                 break;
2123
2124         case QIB_CMD_POLL_TYPE:
2125                 copy = sizeof(cmd.cmd.poll_type);
2126                 dest = &cmd.cmd.poll_type;
2127                 src = &ucmd->cmd.poll_type;
2128                 break;
2129
2130         case QIB_CMD_ARMLAUNCH_CTRL:
2131                 copy = sizeof(cmd.cmd.armlaunch_ctrl);
2132                 dest = &cmd.cmd.armlaunch_ctrl;
2133                 src = &ucmd->cmd.armlaunch_ctrl;
2134                 break;
2135
2136         case QIB_CMD_SDMA_INFLIGHT:
2137                 copy = sizeof(cmd.cmd.sdma_inflight);
2138                 dest = &cmd.cmd.sdma_inflight;
2139                 src = &ucmd->cmd.sdma_inflight;
2140                 break;
2141
2142         case QIB_CMD_SDMA_COMPLETE:
2143                 copy = sizeof(cmd.cmd.sdma_complete);
2144                 dest = &cmd.cmd.sdma_complete;
2145                 src = &ucmd->cmd.sdma_complete;
2146                 break;
2147
2148         case QIB_CMD_ACK_EVENT:
2149                 copy = sizeof(cmd.cmd.event_mask);
2150                 dest = &cmd.cmd.event_mask;
2151                 src = &ucmd->cmd.event_mask;
2152                 break;
2153
2154         default:
2155                 ret = -EINVAL;
2156                 goto bail;
2157         }
2158
2159         if (copy) {
2160                 if ((count - consumed) < copy) {
2161                         ret = -EINVAL;
2162                         goto bail;
2163                 }
2164                 if (copy_from_user(dest, src, copy)) {
2165                         ret = -EFAULT;
2166                         goto bail;
2167                 }
2168                 consumed += copy;
2169         }
2170
2171         rcd = ctxt_fp(fp);
2172         if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
2173                 ret = -EINVAL;
2174                 goto bail;
2175         }
2176
2177         switch (cmd.type) {
2178         case QIB_CMD_ASSIGN_CTXT:
2179                 ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2180                 if (ret)
2181                         goto bail;
2182                 break;
2183
2184         case QIB_CMD_USER_INIT:
2185                 ret = qib_do_user_init(fp, &cmd.cmd.user_info);
2186                 if (ret)
2187                         goto bail;
2188                 ret = qib_get_base_info(fp, (void __user *) (unsigned long)
2189                                         cmd.cmd.user_info.spu_base_info,
2190                                         cmd.cmd.user_info.spu_base_info_size);
2191                 break;
2192
2193         case QIB_CMD_RECV_CTRL:
2194                 ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
2195                 break;
2196
2197         case QIB_CMD_CTXT_INFO:
2198                 ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
2199                                     (unsigned long) cmd.cmd.ctxt_info);
2200                 break;
2201
2202         case QIB_CMD_TID_UPDATE:
2203                 ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
2204                 break;
2205
2206         case QIB_CMD_TID_FREE:
2207                 ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
2208                 break;
2209
2210         case QIB_CMD_SET_PART_KEY:
2211                 ret = qib_set_part_key(rcd, cmd.cmd.part_key);
2212                 break;
2213
2214         case QIB_CMD_DISARM_BUFS:
2215                 (void)qib_disarm_piobufs_ifneeded(rcd);
2216                 ret = disarm_req_delay(rcd);
2217                 break;
2218
2219         case QIB_CMD_PIOAVAILUPD:
2220                 qib_force_pio_avail_update(rcd->dd);
2221                 break;
2222
2223         case QIB_CMD_POLL_TYPE:
2224                 rcd->poll_type = cmd.cmd.poll_type;
2225                 break;
2226
2227         case QIB_CMD_ARMLAUNCH_CTRL:
2228                 rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
2229                 break;
2230
2231         case QIB_CMD_SDMA_INFLIGHT:
2232                 ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
2233                                             (u32 __user *) (unsigned long)
2234                                             cmd.cmd.sdma_inflight);
2235                 break;
2236
2237         case QIB_CMD_SDMA_COMPLETE:
2238                 ret = qib_sdma_get_complete(rcd->ppd,
2239                                             user_sdma_queue_fp(fp),
2240                                             (u32 __user *) (unsigned long)
2241                                             cmd.cmd.sdma_complete);
2242                 break;
2243
2244         case QIB_CMD_ACK_EVENT:
2245                 ret = qib_user_event_ack(rcd, subctxt_fp(fp),
2246                                          cmd.cmd.event_mask);
2247                 break;
2248         }
2249
2250         if (ret >= 0)
2251                 ret = consumed;
2252
2253 bail:
2254         return ret;
2255 }
2256
2257 static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
2258 {
2259         struct qib_filedata *fp = iocb->ki_filp->private_data;
2260         struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
2261         struct qib_user_sdma_queue *pq = fp->pq;
2262
2263         if (!iter_is_iovec(from) || !from->nr_segs || !pq)
2264                 return -EINVAL;
2265                          
2266         return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
2267 }
2268
2269 static struct class *qib_class;
2270 static dev_t qib_dev;
2271
2272 int qib_cdev_init(int minor, const char *name,
2273                   const struct file_operations *fops,
2274                   struct cdev **cdevp, struct device **devp)
2275 {
2276         const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
2277         struct cdev *cdev;
2278         struct device *device = NULL;
2279         int ret;
2280
2281         cdev = cdev_alloc();
2282         if (!cdev) {
2283                 pr_err("Could not allocate cdev for minor %d, %s\n",
2284                        minor, name);
2285                 ret = -ENOMEM;
2286                 goto done;
2287         }
2288
2289         cdev->owner = THIS_MODULE;
2290         cdev->ops = fops;
2291         kobject_set_name(&cdev->kobj, name);
2292
2293         ret = cdev_add(cdev, dev, 1);
2294         if (ret < 0) {
2295                 pr_err("Could not add cdev for minor %d, %s (err %d)\n",
2296                        minor, name, -ret);
2297                 goto err_cdev;
2298         }
2299
2300         device = device_create(qib_class, NULL, dev, NULL, "%s", name);
2301         if (!IS_ERR(device))
2302                 goto done;
2303         ret = PTR_ERR(device);
2304         device = NULL;
2305         pr_err("Could not create device for minor %d, %s (err %d)\n",
2306                minor, name, -ret);
2307 err_cdev:
2308         cdev_del(cdev);
2309         cdev = NULL;
2310 done:
2311         *cdevp = cdev;
2312         *devp = device;
2313         return ret;
2314 }
2315
2316 void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
2317 {
2318         struct device *device = *devp;
2319
2320         if (device) {
2321                 device_unregister(device);
2322                 *devp = NULL;
2323         }
2324
2325         if (*cdevp) {
2326                 cdev_del(*cdevp);
2327                 *cdevp = NULL;
2328         }
2329 }
2330
2331 static struct cdev *wildcard_cdev;
2332 static struct device *wildcard_device;
2333
2334 int __init qib_dev_init(void)
2335 {
2336         int ret;
2337
2338         ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
2339         if (ret < 0) {
2340                 pr_err("Could not allocate chrdev region (err %d)\n", -ret);
2341                 goto done;
2342         }
2343
2344         qib_class = class_create(THIS_MODULE, "ipath");
2345         if (IS_ERR(qib_class)) {
2346                 ret = PTR_ERR(qib_class);
2347                 pr_err("Could not create device class (err %d)\n", -ret);
2348                 unregister_chrdev_region(qib_dev, QIB_NMINORS);
2349         }
2350
2351 done:
2352         return ret;
2353 }
2354
2355 void qib_dev_cleanup(void)
2356 {
2357         if (qib_class) {
2358                 class_destroy(qib_class);
2359                 qib_class = NULL;
2360         }
2361
2362         unregister_chrdev_region(qib_dev, QIB_NMINORS);
2363 }
2364
2365 static atomic_t user_count = ATOMIC_INIT(0);
2366
2367 static void qib_user_remove(struct qib_devdata *dd)
2368 {
2369         if (atomic_dec_return(&user_count) == 0)
2370                 qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2371
2372         qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2373 }
2374
2375 static int qib_user_add(struct qib_devdata *dd)
2376 {
2377         char name[10];
2378         int ret;
2379
2380         if (atomic_inc_return(&user_count) == 1) {
2381                 ret = qib_cdev_init(0, "ipath", &qib_file_ops,
2382                                     &wildcard_cdev, &wildcard_device);
2383                 if (ret)
2384                         goto done;
2385         }
2386
2387         snprintf(name, sizeof(name), "ipath%d", dd->unit);
2388         ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
2389                             &dd->user_cdev, &dd->user_device);
2390         if (ret)
2391                 qib_user_remove(dd);
2392 done:
2393         return ret;
2394 }
2395
2396 /*
2397  * Create per-unit files in /dev
2398  */
2399 int qib_device_create(struct qib_devdata *dd)
2400 {
2401         int r, ret;
2402
2403         r = qib_user_add(dd);
2404         ret = qib_diag_add(dd);
2405         if (r && !ret)
2406                 ret = r;
2407         return ret;
2408 }
2409
2410 /*
2411  * Remove per-unit files in /dev
2412  * void, core kernel returns no errors for this stuff
2413  */
2414 void qib_device_remove(struct qib_devdata *dd)
2415 {
2416         qib_user_remove(dd);
2417         qib_diag_remove(dd);
2418 }