]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/rdma/hfi1/file_ops.c
regmap: rbtree: When adding a reg do a bsearch for target node
[karo-tx-linux.git] / drivers / staging / rdma / hfi1 / file_ops.c
1 /*
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2015 Intel Corporation.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Copyright(c) 2015 Intel Corporation.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50 #include <linux/pci.h>
51 #include <linux/poll.h>
52 #include <linux/cdev.h>
53 #include <linux/swap.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
56 #include <linux/io.h>
57 #include <linux/jiffies.h>
58 #include <asm/pgtable.h>
59 #include <linux/delay.h>
60 #include <linux/export.h>
61 #include <linux/module.h>
62 #include <linux/cred.h>
63 #include <linux/uio.h>
64
65 #include "hfi.h"
66 #include "pio.h"
67 #include "device.h"
68 #include "common.h"
69 #include "trace.h"
70 #include "user_sdma.h"
71 #include "eprom.h"
72
73 #undef pr_fmt
74 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
75
76 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
77
78 /*
79  * File operation functions
80  */
81 static int hfi1_file_open(struct inode *, struct file *);
82 static int hfi1_file_close(struct inode *, struct file *);
83 static ssize_t hfi1_file_write(struct file *, const char __user *,
84                                size_t, loff_t *);
85 static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
86 static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
87 static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
88
89 static u64 kvirt_to_phys(void *);
90 static int assign_ctxt(struct file *, struct hfi1_user_info *);
91 static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
92 static int user_init(struct file *);
93 static int get_ctxt_info(struct file *, void __user *, __u32);
94 static int get_base_info(struct file *, void __user *, __u32);
95 static int setup_ctxt(struct file *);
96 static int setup_subctxt(struct hfi1_ctxtdata *);
97 static int get_user_context(struct file *, struct hfi1_user_info *,
98                             int, unsigned);
99 static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
100 static int allocate_ctxt(struct file *, struct hfi1_devdata *,
101                          struct hfi1_user_info *);
102 static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
103 static unsigned int poll_next(struct file *, struct poll_table_struct *);
104 static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
105 static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
106 static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
107 static int vma_fault(struct vm_area_struct *, struct vm_fault *);
108 static int exp_tid_setup(struct file *, struct hfi1_tid_info *);
109 static int exp_tid_free(struct file *, struct hfi1_tid_info *);
110 static void unlock_exp_tids(struct hfi1_ctxtdata *);
111
112 static const struct file_operations hfi1_file_ops = {
113         .owner = THIS_MODULE,
114         .write = hfi1_file_write,
115         .write_iter = hfi1_write_iter,
116         .open = hfi1_file_open,
117         .release = hfi1_file_close,
118         .poll = hfi1_poll,
119         .mmap = hfi1_file_mmap,
120         .llseek = noop_llseek,
121 };
122
123 static struct vm_operations_struct vm_ops = {
124         .fault = vma_fault,
125 };
126
127 /*
128  * Types of memories mapped into user processes' space
129  */
130 enum mmap_types {
131         PIO_BUFS = 1,
132         PIO_BUFS_SOP,
133         PIO_CRED,
134         RCV_HDRQ,
135         RCV_EGRBUF,
136         UREGS,
137         EVENTS,
138         STATUS,
139         RTAIL,
140         SUBCTXT_UREGS,
141         SUBCTXT_RCV_HDRQ,
142         SUBCTXT_EGRBUF,
143         SDMA_COMP
144 };
145
146 /*
147  * Masks and offsets defining the mmap tokens
148  */
149 #define HFI1_MMAP_OFFSET_MASK   0xfffULL
150 #define HFI1_MMAP_OFFSET_SHIFT  0
151 #define HFI1_MMAP_SUBCTXT_MASK  0xfULL
152 #define HFI1_MMAP_SUBCTXT_SHIFT 12
153 #define HFI1_MMAP_CTXT_MASK     0xffULL
154 #define HFI1_MMAP_CTXT_SHIFT    16
155 #define HFI1_MMAP_TYPE_MASK     0xfULL
156 #define HFI1_MMAP_TYPE_SHIFT    24
157 #define HFI1_MMAP_MAGIC_MASK    0xffffffffULL
158 #define HFI1_MMAP_MAGIC_SHIFT   32
159
160 #define HFI1_MMAP_MAGIC         0xdabbad00
161
162 #define HFI1_MMAP_TOKEN_SET(field, val) \
163         (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
164 #define HFI1_MMAP_TOKEN_GET(field, token) \
165         (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
166 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr)   \
167         (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
168         HFI1_MMAP_TOKEN_SET(TYPE, type) | \
169         HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
170         HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
171         HFI1_MMAP_TOKEN_SET(OFFSET, ((unsigned long)addr & ~PAGE_MASK)))
172
173 #define EXP_TID_SET(field, value)                       \
174         (((value) & EXP_TID_TID##field##_MASK) <<       \
175          EXP_TID_TID##field##_SHIFT)
176 #define EXP_TID_CLEAR(tid, field) {                                     \
177                 (tid) &= ~(EXP_TID_TID##field##_MASK <<                 \
178                            EXP_TID_TID##field##_SHIFT);                 \
179                         }
180 #define EXP_TID_RESET(tid, field, value) do {                           \
181                 EXP_TID_CLEAR(tid, field);                              \
182                 (tid) |= EXP_TID_SET(field, value);                     \
183         } while (0)
184
185 #define dbg(fmt, ...)                           \
186         pr_info(fmt, ##__VA_ARGS__)
187
188
189 static inline int is_valid_mmap(u64 token)
190 {
191         return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
192 }
193
194 static int hfi1_file_open(struct inode *inode, struct file *fp)
195 {
196         /* The real work is performed later in assign_ctxt() */
197         fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
198         if (fp->private_data) /* no cpu affinity by default */
199                 ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
200         return fp->private_data ? 0 : -ENOMEM;
201 }
202
203 static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
204                                size_t count, loff_t *offset)
205 {
206         const struct hfi1_cmd __user *ucmd;
207         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
208         struct hfi1_cmd cmd;
209         struct hfi1_user_info uinfo;
210         struct hfi1_tid_info tinfo;
211         ssize_t consumed = 0, copy = 0, ret = 0;
212         void *dest = NULL;
213         __u64 user_val = 0;
214         int uctxt_required = 1;
215         int must_be_root = 0;
216
217         if (count < sizeof(cmd)) {
218                 ret = -EINVAL;
219                 goto bail;
220         }
221
222         ucmd = (const struct hfi1_cmd __user *)data;
223         if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
224                 ret = -EFAULT;
225                 goto bail;
226         }
227
228         consumed = sizeof(cmd);
229
230         switch (cmd.type) {
231         case HFI1_CMD_ASSIGN_CTXT:
232                 uctxt_required = 0;     /* assigned user context not required */
233                 copy = sizeof(uinfo);
234                 dest = &uinfo;
235                 break;
236         case HFI1_CMD_SDMA_STATUS_UPD:
237         case HFI1_CMD_CREDIT_UPD:
238                 copy = 0;
239                 break;
240         case HFI1_CMD_TID_UPDATE:
241         case HFI1_CMD_TID_FREE:
242                 copy = sizeof(tinfo);
243                 dest = &tinfo;
244                 break;
245         case HFI1_CMD_USER_INFO:
246         case HFI1_CMD_RECV_CTRL:
247         case HFI1_CMD_POLL_TYPE:
248         case HFI1_CMD_ACK_EVENT:
249         case HFI1_CMD_CTXT_INFO:
250         case HFI1_CMD_SET_PKEY:
251         case HFI1_CMD_CTXT_RESET:
252                 copy = 0;
253                 user_val = cmd.addr;
254                 break;
255         case HFI1_CMD_EP_INFO:
256         case HFI1_CMD_EP_ERASE_CHIP:
257         case HFI1_CMD_EP_ERASE_P0:
258         case HFI1_CMD_EP_ERASE_P1:
259         case HFI1_CMD_EP_READ_P0:
260         case HFI1_CMD_EP_READ_P1:
261         case HFI1_CMD_EP_WRITE_P0:
262         case HFI1_CMD_EP_WRITE_P1:
263                 uctxt_required = 0;     /* assigned user context not required */
264                 must_be_root = 1;       /* validate user */
265                 copy = 0;
266                 break;
267         default:
268                 ret = -EINVAL;
269                 goto bail;
270         }
271
272         /* If the command comes with user data, copy it. */
273         if (copy) {
274                 if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
275                         ret = -EFAULT;
276                         goto bail;
277                 }
278                 consumed += copy;
279         }
280
281         /*
282          * Make sure there is a uctxt when needed.
283          */
284         if (uctxt_required && !uctxt) {
285                 ret = -EINVAL;
286                 goto bail;
287         }
288
289         /* only root can do these operations */
290         if (must_be_root && !capable(CAP_SYS_ADMIN)) {
291                 ret = -EPERM;
292                 goto bail;
293         }
294
295         switch (cmd.type) {
296         case HFI1_CMD_ASSIGN_CTXT:
297                 ret = assign_ctxt(fp, &uinfo);
298                 if (ret < 0)
299                         goto bail;
300                 ret = setup_ctxt(fp);
301                 if (ret)
302                         goto bail;
303                 ret = user_init(fp);
304                 break;
305         case HFI1_CMD_CTXT_INFO:
306                 ret = get_ctxt_info(fp, (void __user *)(unsigned long)
307                                     user_val, cmd.len);
308                 break;
309         case HFI1_CMD_USER_INFO:
310                 ret = get_base_info(fp, (void __user *)(unsigned long)
311                                     user_val, cmd.len);
312                 break;
313         case HFI1_CMD_SDMA_STATUS_UPD:
314                 break;
315         case HFI1_CMD_CREDIT_UPD:
316                 if (uctxt && uctxt->sc)
317                         sc_return_credits(uctxt->sc);
318                 break;
319         case HFI1_CMD_TID_UPDATE:
320                 ret = exp_tid_setup(fp, &tinfo);
321                 if (!ret) {
322                         unsigned long addr;
323                         /*
324                          * Copy the number of tidlist entries we used
325                          * and the length of the buffer we registered.
326                          * These fields are adjacent in the structure so
327                          * we can copy them at the same time.
328                          */
329                         addr = (unsigned long)cmd.addr +
330                                 offsetof(struct hfi1_tid_info, tidcnt);
331                         if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
332                                          sizeof(tinfo.tidcnt) +
333                                          sizeof(tinfo.length)))
334                                 ret = -EFAULT;
335                 }
336                 break;
337         case HFI1_CMD_TID_FREE:
338                 ret = exp_tid_free(fp, &tinfo);
339                 break;
340         case HFI1_CMD_RECV_CTRL:
341                 ret = manage_rcvq(uctxt, subctxt_fp(fp), (int)user_val);
342                 break;
343         case HFI1_CMD_POLL_TYPE:
344                 uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
345                 break;
346         case HFI1_CMD_ACK_EVENT:
347                 ret = user_event_ack(uctxt, subctxt_fp(fp), user_val);
348                 break;
349         case HFI1_CMD_SET_PKEY:
350                 if (HFI1_CAP_IS_USET(PKEY_CHECK))
351                         ret = set_ctxt_pkey(uctxt, subctxt_fp(fp), user_val);
352                 else
353                         ret = -EPERM;
354                 break;
355         case HFI1_CMD_CTXT_RESET: {
356                 struct send_context *sc;
357                 struct hfi1_devdata *dd;
358
359                 if (!uctxt || !uctxt->dd || !uctxt->sc) {
360                         ret = -EINVAL;
361                         break;
362                 }
363                 /*
364                  * There is no protection here. User level has to
365                  * guarantee that no one will be writing to the send
366                  * context while it is being re-initialized.
367                  * If user level breaks that guarantee, it will break
368                  * it's own context and no one else's.
369                  */
370                 dd = uctxt->dd;
371                 sc = uctxt->sc;
372                 /*
373                  * Wait until the interrupt handler has marked the
374                  * context as halted or frozen. Report error if we time
375                  * out.
376                  */
377                 wait_event_interruptible_timeout(
378                         sc->halt_wait, (sc->flags & SCF_HALTED),
379                         msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
380                 if (!(sc->flags & SCF_HALTED)) {
381                         ret = -ENOLCK;
382                         break;
383                 }
384                 /*
385                  * If the send context was halted due to a Freeze,
386                  * wait until the device has been "unfrozen" before
387                  * resetting the context.
388                  */
389                 if (sc->flags & SCF_FROZEN) {
390                         wait_event_interruptible_timeout(
391                                 dd->event_queue,
392                                 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
393                                 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
394                         if (dd->flags & HFI1_FROZEN) {
395                                 ret = -ENOLCK;
396                                 break;
397                         }
398                         if (dd->flags & HFI1_FORCED_FREEZE) {
399                                 /* Don't allow context reset if we are into
400                                  * forced freeze */
401                                 ret = -ENODEV;
402                                 break;
403                         }
404                         sc_disable(sc);
405                         ret = sc_enable(sc);
406                         hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
407                                      uctxt->ctxt);
408                 } else
409                         ret = sc_restart(sc);
410                 if (!ret)
411                         sc_return_credits(sc);
412                 break;
413         }
414         case HFI1_CMD_EP_INFO:
415         case HFI1_CMD_EP_ERASE_CHIP:
416         case HFI1_CMD_EP_ERASE_P0:
417         case HFI1_CMD_EP_ERASE_P1:
418         case HFI1_CMD_EP_READ_P0:
419         case HFI1_CMD_EP_READ_P1:
420         case HFI1_CMD_EP_WRITE_P0:
421         case HFI1_CMD_EP_WRITE_P1:
422                 ret = handle_eprom_command(&cmd);
423                 break;
424         }
425
426         if (ret >= 0)
427                 ret = consumed;
428 bail:
429         return ret;
430 }
431
432 static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
433 {
434         struct hfi1_user_sdma_pkt_q *pq;
435         struct hfi1_user_sdma_comp_q *cq;
436         int ret = 0, done = 0, reqs = 0;
437         unsigned long dim = from->nr_segs;
438
439         if (!user_sdma_comp_fp(kiocb->ki_filp) ||
440             !user_sdma_pkt_fp(kiocb->ki_filp)) {
441                 ret = -EIO;
442                 goto done;
443         }
444
445         if (!iter_is_iovec(from) || !dim) {
446                 ret = -EINVAL;
447                 goto done;
448         }
449
450         hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
451                   ctxt_fp(kiocb->ki_filp)->ctxt, subctxt_fp(kiocb->ki_filp),
452                   dim);
453         pq = user_sdma_pkt_fp(kiocb->ki_filp);
454         cq = user_sdma_comp_fp(kiocb->ki_filp);
455
456         if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
457                 ret = -ENOSPC;
458                 goto done;
459         }
460
461         while (dim) {
462                 unsigned long count = 0;
463
464                 ret = hfi1_user_sdma_process_request(
465                         kiocb->ki_filp, (struct iovec *)(from->iov + done),
466                         dim, &count);
467                 if (ret)
468                         goto done;
469                 dim -= count;
470                 done += count;
471                 reqs++;
472         }
473 done:
474         return ret ? ret : reqs;
475 }
476
477 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
478 {
479         struct hfi1_ctxtdata *uctxt;
480         struct hfi1_devdata *dd;
481         unsigned long flags, pfn;
482         u64 token = vma->vm_pgoff << PAGE_SHIFT,
483                 memaddr = 0;
484         u8 subctxt, mapio = 0, vmf = 0, type;
485         ssize_t memlen = 0;
486         int ret = 0;
487         u16 ctxt;
488
489         uctxt = ctxt_fp(fp);
490         if (!is_valid_mmap(token) || !uctxt ||
491             !(vma->vm_flags & VM_SHARED)) {
492                 ret = -EINVAL;
493                 goto done;
494         }
495         dd = uctxt->dd;
496         ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
497         subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
498         type = HFI1_MMAP_TOKEN_GET(TYPE, token);
499         if (ctxt != uctxt->ctxt || subctxt != subctxt_fp(fp)) {
500                 ret = -EINVAL;
501                 goto done;
502         }
503
504         flags = vma->vm_flags;
505
506         switch (type) {
507         case PIO_BUFS:
508         case PIO_BUFS_SOP:
509                 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
510                                 /* chip pio base */
511                            (uctxt->sc->hw_context * (1 << 16))) +
512                                 /* 64K PIO space / ctxt */
513                         (type == PIO_BUFS_SOP ?
514                                 (TXE_PIO_SIZE / 2) : 0); /* sop? */
515                 /*
516                  * Map only the amount allocated to the context, not the
517                  * entire available context's PIO space.
518                  */
519                 memlen = ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE,
520                                PAGE_SIZE);
521                 flags &= ~VM_MAYREAD;
522                 flags |= VM_DONTCOPY | VM_DONTEXPAND;
523                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
524                 mapio = 1;
525                 break;
526         case PIO_CRED:
527                 if (flags & VM_WRITE) {
528                         ret = -EPERM;
529                         goto done;
530                 }
531                 /*
532                  * The credit return location for this context could be on the
533                  * second or third page allocated for credit returns (if number
534                  * of enabled contexts > 64 and 128 respectively).
535                  */
536                 memaddr = dd->cr_base[uctxt->numa_id].pa +
537                         (((u64)uctxt->sc->hw_free -
538                           (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
539                 memlen = PAGE_SIZE;
540                 flags &= ~VM_MAYWRITE;
541                 flags |= VM_DONTCOPY | VM_DONTEXPAND;
542                 /*
543                  * The driver has already allocated memory for credit
544                  * returns and programmed it into the chip. Has that
545                  * memory been flagged as non-cached?
546                  */
547                 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
548                 mapio = 1;
549                 break;
550         case RCV_HDRQ:
551                 memaddr = uctxt->rcvhdrq_phys;
552                 memlen = uctxt->rcvhdrq_size;
553                 break;
554         case RCV_EGRBUF: {
555                 unsigned long addr;
556                 int i;
557                 /*
558                  * The RcvEgr buffer need to be handled differently
559                  * as multiple non-contiguous pages need to be mapped
560                  * into the user process.
561                  */
562                 memlen = uctxt->egrbufs.size;
563                 if ((vma->vm_end - vma->vm_start) != memlen) {
564                         dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
565                                    (vma->vm_end - vma->vm_start), memlen);
566                         ret = -EINVAL;
567                         goto done;
568                 }
569                 if (vma->vm_flags & VM_WRITE) {
570                         ret = -EPERM;
571                         goto done;
572                 }
573                 vma->vm_flags &= ~VM_MAYWRITE;
574                 addr = vma->vm_start;
575                 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
576                         ret = remap_pfn_range(
577                                 vma, addr,
578                                 uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT,
579                                 uctxt->egrbufs.buffers[i].len,
580                                 vma->vm_page_prot);
581                         if (ret < 0)
582                                 goto done;
583                         addr += uctxt->egrbufs.buffers[i].len;
584                 }
585                 ret = 0;
586                 goto done;
587         }
588         case UREGS:
589                 /*
590                  * Map only the page that contains this context's user
591                  * registers.
592                  */
593                 memaddr = (unsigned long)
594                         (dd->physaddr + RXE_PER_CONTEXT_USER)
595                         + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
596                 /*
597                  * TidFlow table is on the same page as the rest of the
598                  * user registers.
599                  */
600                 memlen = PAGE_SIZE;
601                 flags |= VM_DONTCOPY | VM_DONTEXPAND;
602                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
603                 mapio = 1;
604                 break;
605         case EVENTS:
606                 /*
607                  * Use the page where this context's flags are. User level
608                  * knows where it's own bitmap is within the page.
609                  */
610                 memaddr = ((unsigned long)dd->events +
611                            ((uctxt->ctxt - dd->first_user_ctxt) *
612                             HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
613                 memlen = PAGE_SIZE;
614                 /*
615                  * v3.7 removes VM_RESERVED but the effect is kept by
616                  * using VM_IO.
617                  */
618                 flags |= VM_IO | VM_DONTEXPAND;
619                 vmf = 1;
620                 break;
621         case STATUS:
622                 memaddr = kvirt_to_phys((void *)dd->status);
623                 memlen = PAGE_SIZE;
624                 flags |= VM_IO | VM_DONTEXPAND;
625                 break;
626         case RTAIL:
627                 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
628                         /*
629                          * If the memory allocation failed, the context alloc
630                          * also would have failed, so we would never get here
631                          */
632                         ret = -EINVAL;
633                         goto done;
634                 }
635                 if (flags & VM_WRITE) {
636                         ret = -EPERM;
637                         goto done;
638                 }
639                 memaddr = uctxt->rcvhdrqtailaddr_phys;
640                 memlen = PAGE_SIZE;
641                 flags &= ~VM_MAYWRITE;
642                 break;
643         case SUBCTXT_UREGS:
644                 memaddr = (u64)uctxt->subctxt_uregbase;
645                 memlen = PAGE_SIZE;
646                 flags |= VM_IO | VM_DONTEXPAND;
647                 vmf = 1;
648                 break;
649         case SUBCTXT_RCV_HDRQ:
650                 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
651                 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
652                 flags |= VM_IO | VM_DONTEXPAND;
653                 vmf = 1;
654                 break;
655         case SUBCTXT_EGRBUF:
656                 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
657                 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
658                 flags |= VM_IO | VM_DONTEXPAND;
659                 flags &= ~VM_MAYWRITE;
660                 vmf = 1;
661                 break;
662         case SDMA_COMP: {
663                 struct hfi1_user_sdma_comp_q *cq;
664
665                 if (!user_sdma_comp_fp(fp)) {
666                         ret = -EFAULT;
667                         goto done;
668                 }
669                 cq = user_sdma_comp_fp(fp);
670                 memaddr = (u64)cq->comps;
671                 memlen = ALIGN(sizeof(*cq->comps) * cq->nentries, PAGE_SIZE);
672                 flags |= VM_IO | VM_DONTEXPAND;
673                 vmf = 1;
674                 break;
675         }
676         default:
677                 ret = -EINVAL;
678                 break;
679         }
680
681         if ((vma->vm_end - vma->vm_start) != memlen) {
682                 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
683                           uctxt->ctxt, subctxt_fp(fp),
684                           (vma->vm_end - vma->vm_start), memlen);
685                 ret = -EINVAL;
686                 goto done;
687         }
688
689         vma->vm_flags = flags;
690         dd_dev_info(dd,
691                     "%s: %u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
692                     __func__, ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
693                     vma->vm_end - vma->vm_start, vma->vm_flags);
694         pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
695         if (vmf) {
696                 vma->vm_pgoff = pfn;
697                 vma->vm_ops = &vm_ops;
698                 ret = 0;
699         } else if (mapio) {
700                 ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen,
701                                          vma->vm_page_prot);
702         } else {
703                 ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen,
704                                       vma->vm_page_prot);
705         }
706 done:
707         return ret;
708 }
709
710 /*
711  * Local (non-chip) user memory is not mapped right away but as it is
712  * accessed by the user-level code.
713  */
714 static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
715 {
716         struct page *page;
717
718         page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
719         if (!page)
720                 return VM_FAULT_SIGBUS;
721
722         get_page(page);
723         vmf->page = page;
724
725         return 0;
726 }
727
728 static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
729 {
730         struct hfi1_ctxtdata *uctxt;
731         unsigned pollflag;
732
733         uctxt = ctxt_fp(fp);
734         if (!uctxt)
735                 pollflag = POLLERR;
736         else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
737                 pollflag = poll_urgent(fp, pt);
738         else  if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
739                 pollflag = poll_next(fp, pt);
740         else /* invalid */
741                 pollflag = POLLERR;
742
743         return pollflag;
744 }
745
746 static int hfi1_file_close(struct inode *inode, struct file *fp)
747 {
748         struct hfi1_filedata *fdata = fp->private_data;
749         struct hfi1_ctxtdata *uctxt = fdata->uctxt;
750         struct hfi1_devdata *dd;
751         unsigned long flags, *ev;
752
753         fp->private_data = NULL;
754
755         if (!uctxt)
756                 goto done;
757
758         hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
759         dd = uctxt->dd;
760         mutex_lock(&hfi1_mutex);
761
762         flush_wc();
763         /* drain user sdma queue */
764         if (fdata->pq)
765                 hfi1_user_sdma_free_queues(fdata);
766
767         /*
768          * Clear any left over, unhandled events so the next process that
769          * gets this context doesn't get confused.
770          */
771         ev = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
772                            HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
773         *ev = 0;
774
775         if (--uctxt->cnt) {
776                 uctxt->active_slaves &= ~(1 << fdata->subctxt);
777                 uctxt->subpid[fdata->subctxt] = 0;
778                 mutex_unlock(&hfi1_mutex);
779                 goto done;
780         }
781
782         spin_lock_irqsave(&dd->uctxt_lock, flags);
783         /*
784          * Disable receive context and interrupt available, reset all
785          * RcvCtxtCtrl bits to default values.
786          */
787         hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
788                      HFI1_RCVCTRL_TIDFLOW_DIS |
789                      HFI1_RCVCTRL_INTRAVAIL_DIS |
790                      HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
791                      HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
792                      HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
793         /* Clear the context's J_KEY */
794         hfi1_clear_ctxt_jkey(dd, uctxt->ctxt);
795         /*
796          * Reset context integrity checks to default.
797          * (writes to CSRs probably belong in chip.c)
798          */
799         write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
800                         hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
801         sc_disable(uctxt->sc);
802         uctxt->pid = 0;
803         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
804
805         dd->rcd[uctxt->ctxt] = NULL;
806         uctxt->rcvwait_to = 0;
807         uctxt->piowait_to = 0;
808         uctxt->rcvnowait = 0;
809         uctxt->pionowait = 0;
810         uctxt->event_flags = 0;
811
812         hfi1_clear_tids(uctxt);
813         hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
814
815         if (uctxt->tid_pg_list)
816                 unlock_exp_tids(uctxt);
817
818         hfi1_stats.sps_ctxts--;
819         dd->freectxts++;
820         mutex_unlock(&hfi1_mutex);
821         hfi1_free_ctxtdata(dd, uctxt);
822 done:
823         kfree(fdata);
824         return 0;
825 }
826
827 /*
828  * Convert kernel *virtual* addresses to physical addresses.
829  * This is used to vmalloc'ed addresses.
830  */
831 static u64 kvirt_to_phys(void *addr)
832 {
833         struct page *page;
834         u64 paddr = 0;
835
836         page = vmalloc_to_page(addr);
837         if (page)
838                 paddr = page_to_pfn(page) << PAGE_SHIFT;
839
840         return paddr;
841 }
842
843 static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
844 {
845         int i_minor, ret = 0;
846         unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
847
848         swmajor = uinfo->userversion >> 16;
849         if (swmajor != HFI1_USER_SWMAJOR) {
850                 ret = -ENODEV;
851                 goto done;
852         }
853
854         swminor = uinfo->userversion & 0xffff;
855
856         if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
857                 alg = uinfo->hfi1_alg;
858
859         mutex_lock(&hfi1_mutex);
860         /* First, lets check if we need to setup a shared context? */
861         if (uinfo->subctxt_cnt)
862                 ret = find_shared_ctxt(fp, uinfo);
863
864         /*
865          * We execute the following block if we couldn't find a
866          * shared context or if context sharing is not required.
867          */
868         if (!ret) {
869                 i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
870                 ret = get_user_context(fp, uinfo, i_minor - 1, alg);
871         }
872         mutex_unlock(&hfi1_mutex);
873 done:
874         return ret;
875 }
876
877 static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
878                             int devno, unsigned alg)
879 {
880         struct hfi1_devdata *dd = NULL;
881         int ret = 0, devmax, npresent, nup, dev;
882
883         devmax = hfi1_count_units(&npresent, &nup);
884         if (!npresent) {
885                 ret = -ENXIO;
886                 goto done;
887         }
888         if (!nup) {
889                 ret = -ENETDOWN;
890                 goto done;
891         }
892         if (devno >= 0) {
893                 dd = hfi1_lookup(devno);
894                 if (!dd)
895                         ret = -ENODEV;
896                 else if (!dd->freectxts)
897                         ret = -EBUSY;
898         } else {
899                 struct hfi1_devdata *pdd;
900
901                 if (alg == HFI1_ALG_ACROSS) {
902                         unsigned free = 0U;
903
904                         for (dev = 0; dev < devmax; dev++) {
905                                 pdd = hfi1_lookup(dev);
906                                 if (pdd && pdd->freectxts &&
907                                     pdd->freectxts > free) {
908                                         dd = pdd;
909                                         free = pdd->freectxts;
910                                 }
911                         }
912                 } else {
913                         for (dev = 0; dev < devmax; dev++) {
914                                 pdd = hfi1_lookup(dev);
915                                 if (pdd && pdd->freectxts) {
916                                         dd = pdd;
917                                         break;
918                                 }
919                         }
920                 }
921                 if (!dd)
922                         ret = -EBUSY;
923         }
924 done:
925         return ret ? ret : allocate_ctxt(fp, dd, uinfo);
926 }
927
928 static int find_shared_ctxt(struct file *fp,
929                             const struct hfi1_user_info *uinfo)
930 {
931         int devmax, ndev, i;
932         int ret = 0;
933
934         devmax = hfi1_count_units(NULL, NULL);
935
936         for (ndev = 0; ndev < devmax; ndev++) {
937                 struct hfi1_devdata *dd = hfi1_lookup(ndev);
938
939                 /* device portion of usable() */
940                 if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
941                         continue;
942                 for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
943                         struct hfi1_ctxtdata *uctxt = dd->rcd[i];
944
945                         /* Skip ctxts which are not yet open */
946                         if (!uctxt || !uctxt->cnt)
947                                 continue;
948                         /* Skip ctxt if it doesn't match the requested one */
949                         if (memcmp(uctxt->uuid, uinfo->uuid,
950                                    sizeof(uctxt->uuid)) ||
951                             uctxt->subctxt_id != uinfo->subctxt_id ||
952                             uctxt->subctxt_cnt != uinfo->subctxt_cnt)
953                                 continue;
954
955                         /* Verify the sharing process matches the master */
956                         if (uctxt->userversion != uinfo->userversion ||
957                             uctxt->cnt >= uctxt->subctxt_cnt) {
958                                 ret = -EINVAL;
959                                 goto done;
960                         }
961                         ctxt_fp(fp) = uctxt;
962                         subctxt_fp(fp) = uctxt->cnt++;
963                         uctxt->subpid[subctxt_fp(fp)] = current->pid;
964                         uctxt->active_slaves |= 1 << subctxt_fp(fp);
965                         ret = 1;
966                         goto done;
967                 }
968         }
969
970 done:
971         return ret;
972 }
973
974 static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
975                          struct hfi1_user_info *uinfo)
976 {
977         struct hfi1_ctxtdata *uctxt;
978         unsigned ctxt;
979         int ret;
980
981         if (dd->flags & HFI1_FROZEN) {
982                 /*
983                  * Pick an error that is unique from all other errors
984                  * that are returned so the user process knows that
985                  * it tried to allocate while the SPC was frozen.  It
986                  * it should be able to retry with success in a short
987                  * while.
988                  */
989                 return -EIO;
990         }
991
992         for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++)
993                 if (!dd->rcd[ctxt])
994                         break;
995
996         if (ctxt == dd->num_rcv_contexts)
997                 return -EBUSY;
998
999         uctxt = hfi1_create_ctxtdata(dd->pport, ctxt);
1000         if (!uctxt) {
1001                 dd_dev_err(dd,
1002                            "Unable to allocate ctxtdata memory, failing open\n");
1003                 return -ENOMEM;
1004         }
1005         /*
1006          * Allocate and enable a PIO send context.
1007          */
1008         uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
1009                              uctxt->numa_id);
1010         if (!uctxt->sc)
1011                 return -ENOMEM;
1012
1013         dbg("allocated send context %u(%u)\n", uctxt->sc->sw_index,
1014                 uctxt->sc->hw_context);
1015         ret = sc_enable(uctxt->sc);
1016         if (ret)
1017                 return ret;
1018         /*
1019          * Setup shared context resources if the user-level has requested
1020          * shared contexts and this is the 'master' process.
1021          * This has to be done here so the rest of the sub-contexts find the
1022          * proper master.
1023          */
1024         if (uinfo->subctxt_cnt && !subctxt_fp(fp)) {
1025                 ret = init_subctxts(uctxt, uinfo);
1026                 /*
1027                  * On error, we don't need to disable and de-allocate the
1028                  * send context because it will be done during file close
1029                  */
1030                 if (ret)
1031                         return ret;
1032         }
1033         uctxt->userversion = uinfo->userversion;
1034         uctxt->pid = current->pid;
1035         uctxt->flags = HFI1_CAP_UGET(MASK);
1036         init_waitqueue_head(&uctxt->wait);
1037         strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1038         memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1039         uctxt->jkey = generate_jkey(current_uid());
1040         INIT_LIST_HEAD(&uctxt->sdma_queues);
1041         spin_lock_init(&uctxt->sdma_qlock);
1042         hfi1_stats.sps_ctxts++;
1043         dd->freectxts--;
1044         ctxt_fp(fp) = uctxt;
1045
1046         return 0;
1047 }
1048
1049 static int init_subctxts(struct hfi1_ctxtdata *uctxt,
1050                          const struct hfi1_user_info *uinfo)
1051 {
1052         int ret = 0;
1053         unsigned num_subctxts;
1054
1055         num_subctxts = uinfo->subctxt_cnt;
1056         if (num_subctxts > HFI1_MAX_SHARED_CTXTS) {
1057                 ret = -EINVAL;
1058                 goto bail;
1059         }
1060
1061         uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1062         uctxt->subctxt_id = uinfo->subctxt_id;
1063         uctxt->active_slaves = 1;
1064         uctxt->redirect_seq_cnt = 1;
1065         set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1066 bail:
1067         return ret;
1068 }
1069
1070 static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1071 {
1072         int ret = 0;
1073         unsigned num_subctxts = uctxt->subctxt_cnt;
1074
1075         uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
1076         if (!uctxt->subctxt_uregbase) {
1077                 ret = -ENOMEM;
1078                 goto bail;
1079         }
1080         /* We can take the size of the RcvHdr Queue from the master */
1081         uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1082                                                   num_subctxts);
1083         if (!uctxt->subctxt_rcvhdr_base) {
1084                 ret = -ENOMEM;
1085                 goto bail_ureg;
1086         }
1087
1088         uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1089                                                 num_subctxts);
1090         if (!uctxt->subctxt_rcvegrbuf) {
1091                 ret = -ENOMEM;
1092                 goto bail_rhdr;
1093         }
1094         goto bail;
1095 bail_rhdr:
1096         vfree(uctxt->subctxt_rcvhdr_base);
1097 bail_ureg:
1098         vfree(uctxt->subctxt_uregbase);
1099         uctxt->subctxt_uregbase = NULL;
1100 bail:
1101         return ret;
1102 }
1103
1104 static int user_init(struct file *fp)
1105 {
1106         int ret;
1107         unsigned int rcvctrl_ops = 0;
1108         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1109
1110         /* make sure that the context has already been setup */
1111         if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
1112                 ret = -EFAULT;
1113                 goto done;
1114         }
1115
1116         /*
1117          * Subctxts don't need to initialize anything since master
1118          * has done it.
1119          */
1120         if (subctxt_fp(fp)) {
1121                 ret = wait_event_interruptible(uctxt->wait,
1122                         !test_bit(HFI1_CTXT_MASTER_UNINIT,
1123                         &uctxt->event_flags));
1124                 goto done;
1125         }
1126
1127         /* initialize poll variables... */
1128         uctxt->urgent = 0;
1129         uctxt->urgent_poll = 0;
1130
1131         /*
1132          * Now enable the ctxt for receive.
1133          * For chips that are set to DMA the tail register to memory
1134          * when they change (and when the update bit transitions from
1135          * 0 to 1.  So for those chips, we turn it off and then back on.
1136          * This will (very briefly) affect any other open ctxts, but the
1137          * duration is very short, and therefore isn't an issue.  We
1138          * explicitly set the in-memory tail copy to 0 beforehand, so we
1139          * don't have to wait to be sure the DMA update has happened
1140          * (chip resets head/tail to 0 on transition to enable).
1141          */
1142         if (uctxt->rcvhdrtail_kvaddr)
1143                 clear_rcvhdrtail(uctxt);
1144
1145         /* Setup J_KEY before enabling the context */
1146         hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
1147
1148         rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
1149         if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
1150                 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1151         /*
1152          * Ignore the bit in the flags for now until proper
1153          * support for multiple packet per rcv array entry is
1154          * added.
1155          */
1156         if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
1157                 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
1158         if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
1159                 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
1160         if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
1161                 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
1162         if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
1163                 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
1164         hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
1165
1166         /* Notify any waiting slaves */
1167         if (uctxt->subctxt_cnt) {
1168                 clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1169                 wake_up(&uctxt->wait);
1170         }
1171         ret = 0;
1172
1173 done:
1174         return ret;
1175 }
1176
1177 static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
1178 {
1179         struct hfi1_ctxt_info cinfo;
1180         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1181         struct hfi1_filedata *fd = fp->private_data;
1182         int ret = 0;
1183
1184         ret = hfi1_get_base_kinfo(uctxt, &cinfo);
1185         if (ret < 0)
1186                 goto done;
1187         cinfo.num_active = hfi1_count_active_units();
1188         cinfo.unit = uctxt->dd->unit;
1189         cinfo.ctxt = uctxt->ctxt;
1190         cinfo.subctxt = subctxt_fp(fp);
1191         cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1192                                 uctxt->dd->rcv_entries.group_size) +
1193                 uctxt->expected_count;
1194         cinfo.credits = uctxt->sc->credits;
1195         cinfo.numa_node = uctxt->numa_id;
1196         cinfo.rec_cpu = fd->rec_cpu_num;
1197         cinfo.send_ctxt = uctxt->sc->hw_context;
1198
1199         cinfo.egrtids = uctxt->egrbufs.alloced;
1200         cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1201         cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
1202         cinfo.sdma_ring_size = user_sdma_comp_fp(fp)->nentries;
1203         cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1204
1205         trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, subctxt_fp(fp), cinfo);
1206         if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
1207                 ret = -EFAULT;
1208 done:
1209         return ret;
1210 }
1211
1212 static int setup_ctxt(struct file *fp)
1213 {
1214         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1215         struct hfi1_devdata *dd = uctxt->dd;
1216         int ret = 0;
1217
1218         /*
1219          * Context should be set up only once (including allocation and
1220          * programming of eager buffers. This is done if context sharing
1221          * is not requested or by the master process.
1222          */
1223         if (!uctxt->subctxt_cnt || !subctxt_fp(fp)) {
1224                 ret = hfi1_init_ctxt(uctxt->sc);
1225                 if (ret)
1226                         goto done;
1227
1228                 /* Now allocate the RcvHdr queue and eager buffers. */
1229                 ret = hfi1_create_rcvhdrq(dd, uctxt);
1230                 if (ret)
1231                         goto done;
1232                 ret = hfi1_setup_eagerbufs(uctxt);
1233                 if (ret)
1234                         goto done;
1235                 if (uctxt->subctxt_cnt && !subctxt_fp(fp)) {
1236                         ret = setup_subctxt(uctxt);
1237                         if (ret)
1238                                 goto done;
1239                 }
1240                 /* Setup Expected Rcv memories */
1241                 uctxt->tid_pg_list = vzalloc(uctxt->expected_count *
1242                                              sizeof(struct page **));
1243                 if (!uctxt->tid_pg_list) {
1244                         ret = -ENOMEM;
1245                         goto done;
1246                 }
1247                 uctxt->physshadow = vzalloc(uctxt->expected_count *
1248                                             sizeof(*uctxt->physshadow));
1249                 if (!uctxt->physshadow) {
1250                         ret = -ENOMEM;
1251                         goto done;
1252                 }
1253                 /* allocate expected TID map and initialize the cursor */
1254                 atomic_set(&uctxt->tidcursor, 0);
1255                 uctxt->numtidgroups = uctxt->expected_count /
1256                         dd->rcv_entries.group_size;
1257                 uctxt->tidmapcnt = uctxt->numtidgroups / BITS_PER_LONG +
1258                         !!(uctxt->numtidgroups % BITS_PER_LONG);
1259                 uctxt->tidusemap = kzalloc_node(uctxt->tidmapcnt *
1260                                                 sizeof(*uctxt->tidusemap),
1261                                                 GFP_KERNEL, uctxt->numa_id);
1262                 if (!uctxt->tidusemap) {
1263                         ret = -ENOMEM;
1264                         goto done;
1265                 }
1266                 /*
1267                  * In case that the number of groups is not a multiple of
1268                  * 64 (the number of groups in a tidusemap element), mark
1269                  * the extra ones as used. This will effectively make them
1270                  * permanently used and should never be assigned. Otherwise,
1271                  * the code which checks how many free groups we have will
1272                  * get completely confused about the state of the bits.
1273                  */
1274                 if (uctxt->numtidgroups % BITS_PER_LONG)
1275                         uctxt->tidusemap[uctxt->tidmapcnt - 1] =
1276                                 ~((1ULL << (uctxt->numtidgroups %
1277                                             BITS_PER_LONG)) - 1);
1278                 trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0,
1279                                        uctxt->tidusemap, uctxt->tidmapcnt);
1280         }
1281         ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
1282         if (ret)
1283                 goto done;
1284
1285         set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1286 done:
1287         return ret;
1288 }
1289
1290 static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
1291 {
1292         struct hfi1_base_info binfo;
1293         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1294         struct hfi1_devdata *dd = uctxt->dd;
1295         ssize_t sz;
1296         unsigned offset;
1297         int ret = 0;
1298
1299         trace_hfi1_uctxtdata(uctxt->dd, uctxt);
1300
1301         memset(&binfo, 0, sizeof(binfo));
1302         binfo.hw_version = dd->revision;
1303         binfo.sw_version = HFI1_KERN_SWVERSION;
1304         binfo.bthqp = kdeth_qp;
1305         binfo.jkey = uctxt->jkey;
1306         /*
1307          * If more than 64 contexts are enabled the allocated credit
1308          * return will span two or three contiguous pages. Since we only
1309          * map the page containing the context's credit return address,
1310          * we need to calculate the offset in the proper page.
1311          */
1312         offset = ((u64)uctxt->sc->hw_free -
1313                   (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1314         binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
1315                                                subctxt_fp(fp), offset);
1316         binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
1317                                             subctxt_fp(fp),
1318                                             uctxt->sc->base_addr);
1319         binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1320                                                 uctxt->ctxt,
1321                                                 subctxt_fp(fp),
1322                                                 uctxt->sc->base_addr);
1323         binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
1324                                                subctxt_fp(fp),
1325                                                uctxt->rcvhdrq);
1326         binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
1327                                                subctxt_fp(fp),
1328                                                uctxt->egrbufs.rcvtids[0].phys);
1329         binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
1330                                                  subctxt_fp(fp), 0);
1331         /*
1332          * user regs are at
1333          * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1334          */
1335         binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
1336                                             subctxt_fp(fp), 0);
1337         offset = ((((uctxt->ctxt - dd->first_user_ctxt) *
1338                     HFI1_MAX_SHARED_CTXTS) + subctxt_fp(fp)) *
1339                   sizeof(*dd->events)) & ~PAGE_MASK;
1340         binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
1341                                               subctxt_fp(fp),
1342                                               offset);
1343         binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
1344                                               subctxt_fp(fp),
1345                                               dd->status);
1346         if (HFI1_CAP_IS_USET(DMA_RTAIL))
1347                 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
1348                                                        subctxt_fp(fp), 0);
1349         if (uctxt->subctxt_cnt) {
1350                 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
1351                                                         uctxt->ctxt,
1352                                                         subctxt_fp(fp), 0);
1353                 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
1354                                                          uctxt->ctxt,
1355                                                          subctxt_fp(fp), 0);
1356                 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
1357                                                          uctxt->ctxt,
1358                                                          subctxt_fp(fp), 0);
1359         }
1360         sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
1361         if (copy_to_user(ubase, &binfo, sz))
1362                 ret = -EFAULT;
1363         return ret;
1364 }
1365
1366 static unsigned int poll_urgent(struct file *fp,
1367                                 struct poll_table_struct *pt)
1368 {
1369         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1370         struct hfi1_devdata *dd = uctxt->dd;
1371         unsigned pollflag;
1372
1373         poll_wait(fp, &uctxt->wait, pt);
1374
1375         spin_lock_irq(&dd->uctxt_lock);
1376         if (uctxt->urgent != uctxt->urgent_poll) {
1377                 pollflag = POLLIN | POLLRDNORM;
1378                 uctxt->urgent_poll = uctxt->urgent;
1379         } else {
1380                 pollflag = 0;
1381                 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1382         }
1383         spin_unlock_irq(&dd->uctxt_lock);
1384
1385         return pollflag;
1386 }
1387
1388 static unsigned int poll_next(struct file *fp,
1389                               struct poll_table_struct *pt)
1390 {
1391         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1392         struct hfi1_devdata *dd = uctxt->dd;
1393         unsigned pollflag;
1394
1395         poll_wait(fp, &uctxt->wait, pt);
1396
1397         spin_lock_irq(&dd->uctxt_lock);
1398         if (hdrqempty(uctxt)) {
1399                 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
1400                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
1401                 pollflag = 0;
1402         } else
1403                 pollflag = POLLIN | POLLRDNORM;
1404         spin_unlock_irq(&dd->uctxt_lock);
1405
1406         return pollflag;
1407 }
1408
1409 /*
1410  * Find all user contexts in use, and set the specified bit in their
1411  * event mask.
1412  * See also find_ctxt() for a similar use, that is specific to send buffers.
1413  */
1414 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1415 {
1416         struct hfi1_ctxtdata *uctxt;
1417         struct hfi1_devdata *dd = ppd->dd;
1418         unsigned ctxt;
1419         int ret = 0;
1420         unsigned long flags;
1421
1422         if (!dd->events) {
1423                 ret = -EINVAL;
1424                 goto done;
1425         }
1426
1427         spin_lock_irqsave(&dd->uctxt_lock, flags);
1428         for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts;
1429              ctxt++) {
1430                 uctxt = dd->rcd[ctxt];
1431                 if (uctxt) {
1432                         unsigned long *evs = dd->events +
1433                                 (uctxt->ctxt - dd->first_user_ctxt) *
1434                                 HFI1_MAX_SHARED_CTXTS;
1435                         int i;
1436                         /*
1437                          * subctxt_cnt is 0 if not shared, so do base
1438                          * separately, first, then remaining subctxt, if any
1439                          */
1440                         set_bit(evtbit, evs);
1441                         for (i = 1; i < uctxt->subctxt_cnt; i++)
1442                                 set_bit(evtbit, evs + i);
1443                 }
1444         }
1445         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1446 done:
1447         return ret;
1448 }
1449
1450 /**
1451  * manage_rcvq - manage a context's receive queue
1452  * @uctxt: the context
1453  * @subctxt: the sub-context
1454  * @start_stop: action to carry out
1455  *
1456  * start_stop == 0 disables receive on the context, for use in queue
1457  * overflow conditions.  start_stop==1 re-enables, to be used to
1458  * re-init the software copy of the head register
1459  */
1460 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1461                        int start_stop)
1462 {
1463         struct hfi1_devdata *dd = uctxt->dd;
1464         unsigned int rcvctrl_op;
1465
1466         if (subctxt)
1467                 goto bail;
1468         /* atomically clear receive enable ctxt. */
1469         if (start_stop) {
1470                 /*
1471                  * On enable, force in-memory copy of the tail register to
1472                  * 0, so that protocol code doesn't have to worry about
1473                  * whether or not the chip has yet updated the in-memory
1474                  * copy or not on return from the system call. The chip
1475                  * always resets it's tail register back to 0 on a
1476                  * transition from disabled to enabled.
1477                  */
1478                 if (uctxt->rcvhdrtail_kvaddr)
1479                         clear_rcvhdrtail(uctxt);
1480                 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
1481         } else
1482                 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
1483         hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
1484         /* always; new head should be equal to new tail; see above */
1485 bail:
1486         return 0;
1487 }
1488
1489 /*
1490  * clear the event notifier events for this context.
1491  * User process then performs actions appropriate to bit having been
1492  * set, if desired, and checks again in future.
1493  */
1494 static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
1495                           unsigned long events)
1496 {
1497         int i;
1498         struct hfi1_devdata *dd = uctxt->dd;
1499         unsigned long *evs;
1500
1501         if (!dd->events)
1502                 return 0;
1503
1504         evs = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
1505                             HFI1_MAX_SHARED_CTXTS) + subctxt;
1506
1507         for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1508                 if (!test_bit(i, &events))
1509                         continue;
1510                 clear_bit(i, evs);
1511         }
1512         return 0;
1513 }
1514
1515 #define num_user_pages(vaddr, len)                                      \
1516         (1 + (((((unsigned long)(vaddr) +                               \
1517                  (unsigned long)(len) - 1) & PAGE_MASK) -               \
1518                ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
1519
1520 /**
1521  * tzcnt - count the number of trailing zeros in a 64bit value
1522  * @value: the value to be examined
1523  *
1524  * Returns the number of trailing least significant zeros in the
1525  * the input value. If the value is zero, return the number of
1526  * bits of the value.
1527  */
1528 static inline u8 tzcnt(u64 value)
1529 {
1530         return value ? __builtin_ctzl(value) : sizeof(value) * 8;
1531 }
1532
1533 static inline unsigned num_free_groups(unsigned long map, u16 *start)
1534 {
1535         unsigned free;
1536         u16 bitidx = *start;
1537
1538         if (bitidx >= BITS_PER_LONG)
1539                 return 0;
1540         /* "Turn off" any bits set before our bit index */
1541         map &= ~((1ULL << bitidx) - 1);
1542         free = tzcnt(map) - bitidx;
1543         while (!free && bitidx < BITS_PER_LONG) {
1544                 /* Zero out the last set bit so we look at the rest */
1545                 map &= ~(1ULL << bitidx);
1546                 /*
1547                  * Account for the previously checked bits and advance
1548                  * the bit index. We don't have to check for bitidx
1549                  * getting bigger than BITS_PER_LONG here as it would
1550                  * mean extra instructions that we don't need. If it
1551                  * did happen, it would push free to a negative value
1552                  * which will break the loop.
1553                  */
1554                 free = tzcnt(map) - ++bitidx;
1555         }
1556         *start = bitidx;
1557         return free;
1558 }
1559
1560 static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
1561 {
1562         int ret = 0;
1563         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1564         struct hfi1_devdata *dd = uctxt->dd;
1565         unsigned tid, mapped = 0, npages, ngroups, exp_groups,
1566                 tidpairs = uctxt->expected_count / 2;
1567         struct page **pages;
1568         unsigned long vaddr, tidmap[uctxt->tidmapcnt];
1569         dma_addr_t *phys;
1570         u32 tidlist[tidpairs], pairidx = 0, tidcursor;
1571         u16 useidx, idx, bitidx, tidcnt = 0;
1572
1573         vaddr = tinfo->vaddr;
1574
1575         if (vaddr & ~PAGE_MASK) {
1576                 ret = -EINVAL;
1577                 goto bail;
1578         }
1579
1580         npages = num_user_pages(vaddr, tinfo->length);
1581         if (!npages) {
1582                 ret = -EINVAL;
1583                 goto bail;
1584         }
1585         if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
1586                        npages * PAGE_SIZE)) {
1587                 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
1588                            (void *)vaddr, npages);
1589                 ret = -EFAULT;
1590                 goto bail;
1591         }
1592
1593         memset(tidmap, 0, sizeof(tidmap[0]) * uctxt->tidmapcnt);
1594         memset(tidlist, 0, sizeof(tidlist[0]) * tidpairs);
1595
1596         exp_groups = uctxt->expected_count / dd->rcv_entries.group_size;
1597         /* which group set do we look at first? */
1598         tidcursor = atomic_read(&uctxt->tidcursor);
1599         useidx = (tidcursor >> 16) & 0xffff;
1600         bitidx = tidcursor & 0xffff;
1601
1602         /*
1603          * Keep going until we've mapped all pages or we've exhausted all
1604          * RcvArray entries.
1605          * This iterates over the number of tidmaps + 1
1606          * (idx <= uctxt->tidmapcnt) so we check the bitmap which we
1607          * started from one more time for any free bits before the
1608          * starting point bit.
1609          */
1610         for (mapped = 0, idx = 0;
1611              mapped < npages && idx <= uctxt->tidmapcnt;) {
1612                 u64 i, offset = 0;
1613                 unsigned free, pinned, pmapped = 0, bits_used;
1614                 u16 grp;
1615
1616                 /*
1617                  * "Reserve" the needed group bits under lock so other
1618                  * processes can't step in the middle of it. Once
1619                  * reserved, we don't need the lock anymore since we
1620                  * are guaranteed the groups.
1621                  */
1622                 spin_lock(&uctxt->exp_lock);
1623                 if (uctxt->tidusemap[useidx] == -1ULL ||
1624                     bitidx >= BITS_PER_LONG) {
1625                         /* no free groups in the set, use the next */
1626                         useidx = (useidx + 1) % uctxt->tidmapcnt;
1627                         idx++;
1628                         bitidx = 0;
1629                         spin_unlock(&uctxt->exp_lock);
1630                         continue;
1631                 }
1632                 ngroups = ((npages - mapped) / dd->rcv_entries.group_size) +
1633                         !!((npages - mapped) % dd->rcv_entries.group_size);
1634
1635                 /*
1636                  * If we've gotten here, the current set of groups does have
1637                  * one or more free groups.
1638                  */
1639                 free = num_free_groups(uctxt->tidusemap[useidx], &bitidx);
1640                 if (!free) {
1641                         /*
1642                          * Despite the check above, free could still come back
1643                          * as 0 because we don't check the entire bitmap but
1644                          * we start from bitidx.
1645                          */
1646                         spin_unlock(&uctxt->exp_lock);
1647                         continue;
1648                 }
1649                 bits_used = min(free, ngroups);
1650                 tidmap[useidx] |= ((1ULL << bits_used) - 1) << bitidx;
1651                 uctxt->tidusemap[useidx] |= tidmap[useidx];
1652                 spin_unlock(&uctxt->exp_lock);
1653
1654                 /*
1655                  * At this point, we know where in the map we have free bits.
1656                  * properly offset into the various "shadow" arrays and compute
1657                  * the RcvArray entry index.
1658                  */
1659                 offset = ((useidx * BITS_PER_LONG) + bitidx) *
1660                         dd->rcv_entries.group_size;
1661                 pages = uctxt->tid_pg_list + offset;
1662                 phys = uctxt->physshadow + offset;
1663                 tid = uctxt->expected_base + offset;
1664
1665                 /* Calculate how many pages we can pin based on free bits */
1666                 pinned = min((bits_used * dd->rcv_entries.group_size),
1667                              (npages - mapped));
1668                 /*
1669                  * Now that we know how many free RcvArray entries we have,
1670                  * we can pin that many user pages.
1671                  */
1672                 ret = hfi1_get_user_pages(vaddr + (mapped * PAGE_SIZE),
1673                                           pinned, pages);
1674                 if (ret) {
1675                         /*
1676                          * We can't continue because the pages array won't be
1677                          * initialized. This should never happen,
1678                          * unless perhaps the user has mpin'ed the pages
1679                          * themselves.
1680                          */
1681                         dd_dev_info(dd,
1682                                     "Failed to lock addr %p, %u pages: errno %d\n",
1683                                     (void *) vaddr, pinned, -ret);
1684                         /*
1685                          * Let go of the bits that we reserved since we are not
1686                          * going to use them.
1687                          */
1688                         spin_lock(&uctxt->exp_lock);
1689                         uctxt->tidusemap[useidx] &=
1690                                 ~(((1ULL << bits_used) - 1) << bitidx);
1691                         spin_unlock(&uctxt->exp_lock);
1692                         goto done;
1693                 }
1694                 /*
1695                  * How many groups do we need based on how many pages we have
1696                  * pinned?
1697                  */
1698                 ngroups = (pinned / dd->rcv_entries.group_size) +
1699                         !!(pinned % dd->rcv_entries.group_size);
1700                 /*
1701                  * Keep programming RcvArray entries for all the <ngroups> free
1702                  * groups.
1703                  */
1704                 for (i = 0, grp = 0; grp < ngroups; i++, grp++) {
1705                         unsigned j;
1706                         u32 pair_size = 0, tidsize;
1707                         /*
1708                          * This inner loop will program an entire group or the
1709                          * array of pinned pages (which ever limit is hit
1710                          * first).
1711                          */
1712                         for (j = 0; j < dd->rcv_entries.group_size &&
1713                                      pmapped < pinned; j++, pmapped++, tid++) {
1714                                 tidsize = PAGE_SIZE;
1715                                 phys[pmapped] = hfi1_map_page(dd->pcidev,
1716                                                    pages[pmapped], 0,
1717                                                    tidsize, PCI_DMA_FROMDEVICE);
1718                                 trace_hfi1_exp_rcv_set(uctxt->ctxt,
1719                                                        subctxt_fp(fp),
1720                                                        tid, vaddr,
1721                                                        phys[pmapped],
1722                                                        pages[pmapped]);
1723                                 /*
1724                                  * Each RcvArray entry is programmed with one
1725                                  * page * worth of memory. This will handle
1726                                  * the 8K MTU as well as anything smaller
1727                                  * due to the fact that both entries in the
1728                                  * RcvTidPair are programmed with a page.
1729                                  * PSM currently does not handle anything
1730                                  * bigger than 8K MTU, so should we even worry
1731                                  * about 10K here?
1732                                  */
1733                                 hfi1_put_tid(dd, tid, PT_EXPECTED,
1734                                              phys[pmapped],
1735                                              ilog2(tidsize >> PAGE_SHIFT) + 1);
1736                                 pair_size += tidsize >> PAGE_SHIFT;
1737                                 EXP_TID_RESET(tidlist[pairidx], LEN, pair_size);
1738                                 if (!(tid % 2)) {
1739                                         tidlist[pairidx] |=
1740                                            EXP_TID_SET(IDX,
1741                                                 (tid - uctxt->expected_base)
1742                                                        / 2);
1743                                         tidlist[pairidx] |=
1744                                                 EXP_TID_SET(CTRL, 1);
1745                                         tidcnt++;
1746                                 } else {
1747                                         tidlist[pairidx] |=
1748                                                 EXP_TID_SET(CTRL, 2);
1749                                         pair_size = 0;
1750                                         pairidx++;
1751                                 }
1752                         }
1753                         /*
1754                          * We've programmed the entire group (or as much of the
1755                          * group as we'll use. Now, it's time to push it out...
1756                          */
1757                         flush_wc();
1758                 }
1759                 mapped += pinned;
1760                 atomic_set(&uctxt->tidcursor,
1761                            (((useidx & 0xffffff) << 16) |
1762                             ((bitidx + bits_used) & 0xffffff)));
1763         }
1764         trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0, uctxt->tidusemap,
1765                                uctxt->tidmapcnt);
1766
1767 done:
1768         /* If we've mapped anything, copy relevant info to user */
1769         if (mapped) {
1770                 if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
1771                                  tidlist, sizeof(tidlist[0]) * tidcnt)) {
1772                         ret = -EFAULT;
1773                         goto done;
1774                 }
1775                 /* copy TID info to user */
1776                 if (copy_to_user((void __user *)(unsigned long)tinfo->tidmap,
1777                                  tidmap, sizeof(tidmap[0]) * uctxt->tidmapcnt))
1778                         ret = -EFAULT;
1779         }
1780 bail:
1781         /*
1782          * Calculate mapped length. New Exp TID protocol does not "unwind" and
1783          * report an error if it can't map the entire buffer. It just reports
1784          * the length that was mapped.
1785          */
1786         tinfo->length = mapped * PAGE_SIZE;
1787         tinfo->tidcnt = tidcnt;
1788         return ret;
1789 }
1790
1791 static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo)
1792 {
1793         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1794         struct hfi1_devdata *dd = uctxt->dd;
1795         unsigned long tidmap[uctxt->tidmapcnt];
1796         struct page **pages;
1797         dma_addr_t *phys;
1798         u16 idx, bitidx, tid;
1799         int ret = 0;
1800
1801         if (copy_from_user(&tidmap, (void __user *)(unsigned long)
1802                            tinfo->tidmap,
1803                            sizeof(tidmap[0]) * uctxt->tidmapcnt)) {
1804                 ret = -EFAULT;
1805                 goto done;
1806         }
1807         for (idx = 0; idx < uctxt->tidmapcnt; idx++) {
1808                 unsigned long map;
1809
1810                 bitidx = 0;
1811                 if (!tidmap[idx])
1812                         continue;
1813                 map = tidmap[idx];
1814                 while ((bitidx = tzcnt(map)) < BITS_PER_LONG) {
1815                         int i, pcount = 0;
1816                         struct page *pshadow[dd->rcv_entries.group_size];
1817                         unsigned offset = ((idx * BITS_PER_LONG) + bitidx) *
1818                                 dd->rcv_entries.group_size;
1819
1820                         pages = uctxt->tid_pg_list + offset;
1821                         phys = uctxt->physshadow + offset;
1822                         tid = uctxt->expected_base + offset;
1823                         for (i = 0; i < dd->rcv_entries.group_size;
1824                              i++, tid++) {
1825                                 if (pages[i]) {
1826                                         hfi1_put_tid(dd, tid, PT_INVALID,
1827                                                       0, 0);
1828                                         trace_hfi1_exp_rcv_free(uctxt->ctxt,
1829                                                                 subctxt_fp(fp),
1830                                                                 tid, phys[i],
1831                                                                 pages[i]);
1832                                         pci_unmap_page(dd->pcidev, phys[i],
1833                                               PAGE_SIZE, PCI_DMA_FROMDEVICE);
1834                                         pshadow[pcount] = pages[i];
1835                                         pages[i] = NULL;
1836                                         pcount++;
1837                                         phys[i] = 0;
1838                                 }
1839                         }
1840                         flush_wc();
1841                         hfi1_release_user_pages(pshadow, pcount);
1842                         clear_bit(bitidx, &uctxt->tidusemap[idx]);
1843                         map &= ~(1ULL<<bitidx);
1844                 }
1845         }
1846         trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 1, uctxt->tidusemap,
1847                                uctxt->tidmapcnt);
1848 done:
1849         return ret;
1850 }
1851
1852 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt)
1853 {
1854         struct hfi1_devdata *dd = uctxt->dd;
1855         unsigned tid;
1856
1857         dd_dev_info(dd, "ctxt %u unlocking any locked expTID pages\n",
1858                     uctxt->ctxt);
1859         for (tid = 0; tid < uctxt->expected_count; tid++) {
1860                 struct page *p = uctxt->tid_pg_list[tid];
1861                 dma_addr_t phys;
1862
1863                 if (!p)
1864                         continue;
1865
1866                 phys = uctxt->physshadow[tid];
1867                 uctxt->physshadow[tid] = 0;
1868                 uctxt->tid_pg_list[tid] = NULL;
1869                 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
1870                 hfi1_release_user_pages(&p, 1);
1871         }
1872 }
1873
1874 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1875                          u16 pkey)
1876 {
1877         int ret = -ENOENT, i, intable = 0;
1878         struct hfi1_pportdata *ppd = uctxt->ppd;
1879         struct hfi1_devdata *dd = uctxt->dd;
1880
1881         if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1882                 ret = -EINVAL;
1883                 goto done;
1884         }
1885
1886         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1887                 if (pkey == ppd->pkeys[i]) {
1888                         intable = 1;
1889                         break;
1890                 }
1891
1892         if (intable)
1893                 ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
1894 done:
1895         return ret;
1896 }
1897
1898 static int ui_open(struct inode *inode, struct file *filp)
1899 {
1900         struct hfi1_devdata *dd;
1901
1902         dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
1903         filp->private_data = dd; /* for other methods */
1904         return 0;
1905 }
1906
1907 static int ui_release(struct inode *inode, struct file *filp)
1908 {
1909         /* nothing to do */
1910         return 0;
1911 }
1912
1913 static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1914 {
1915         struct hfi1_devdata *dd = filp->private_data;
1916
1917         switch (whence) {
1918         case SEEK_SET:
1919                 break;
1920         case SEEK_CUR:
1921                 offset += filp->f_pos;
1922                 break;
1923         case SEEK_END:
1924                 offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
1925                         offset;
1926                 break;
1927         default:
1928                 return -EINVAL;
1929         }
1930
1931         if (offset < 0)
1932                 return -EINVAL;
1933
1934         if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
1935                 return -EINVAL;
1936
1937         filp->f_pos = offset;
1938
1939         return filp->f_pos;
1940 }
1941
1942
1943 /* NOTE: assumes unsigned long is 8 bytes */
1944 static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
1945                         loff_t *f_pos)
1946 {
1947         struct hfi1_devdata *dd = filp->private_data;
1948         void __iomem *base = dd->kregbase;
1949         unsigned long total, csr_off,
1950                 barlen = (dd->kregend - dd->kregbase);
1951         u64 data;
1952
1953         /* only read 8 byte quantities */
1954         if ((count % 8) != 0)
1955                 return -EINVAL;
1956         /* offset must be 8-byte aligned */
1957         if ((*f_pos % 8) != 0)
1958                 return -EINVAL;
1959         /* destination buffer must be 8-byte aligned */
1960         if ((unsigned long)buf % 8 != 0)
1961                 return -EINVAL;
1962         /* must be in range */
1963         if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
1964                 return -EINVAL;
1965         /* only set the base if we are not starting past the BAR */
1966         if (*f_pos < barlen)
1967                 base += *f_pos;
1968         csr_off = *f_pos;
1969         for (total = 0; total < count; total += 8, csr_off += 8) {
1970                 /* accessing LCB CSRs requires more checks */
1971                 if (is_lcb_offset(csr_off)) {
1972                         if (read_lcb_csr(dd, csr_off, (u64 *)&data))
1973                                 break; /* failed */
1974                 }
1975                 /*
1976                  * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
1977                  * false parity error.  Avoid the whole issue by not reading
1978                  * them.  These registers are defined as having a read value
1979                  * of 0.
1980                  */
1981                 else if (csr_off == ASIC_GPIO_CLEAR
1982                                 || csr_off == ASIC_GPIO_FORCE
1983                                 || csr_off == ASIC_QSFP1_CLEAR
1984                                 || csr_off == ASIC_QSFP1_FORCE
1985                                 || csr_off == ASIC_QSFP2_CLEAR
1986                                 || csr_off == ASIC_QSFP2_FORCE)
1987                         data = 0;
1988                 else if (csr_off >= barlen) {
1989                         /*
1990                          * read_8051_data can read more than just 8 bytes at
1991                          * a time. However, folding this into the loop and
1992                          * handling the reads in 8 byte increments allows us
1993                          * to smoothly transition from chip memory to 8051
1994                          * memory.
1995                          */
1996                         if (read_8051_data(dd,
1997                                            (u32)(csr_off - barlen),
1998                                            sizeof(data), &data))
1999                                 break; /* failed */
2000                 } else
2001                         data = readq(base + total);
2002                 if (put_user(data, (unsigned long __user *)(buf + total)))
2003                         break;
2004         }
2005         *f_pos += total;
2006         return total;
2007 }
2008
2009 /* NOTE: assumes unsigned long is 8 bytes */
2010 static ssize_t ui_write(struct file *filp, const char __user *buf,
2011                         size_t count, loff_t *f_pos)
2012 {
2013         struct hfi1_devdata *dd = filp->private_data;
2014         void __iomem *base;
2015         unsigned long total, data, csr_off;
2016         int in_lcb;
2017
2018         /* only write 8 byte quantities */
2019         if ((count % 8) != 0)
2020                 return -EINVAL;
2021         /* offset must be 8-byte aligned */
2022         if ((*f_pos % 8) != 0)
2023                 return -EINVAL;
2024         /* source buffer must be 8-byte aligned */
2025         if ((unsigned long)buf % 8 != 0)
2026                 return -EINVAL;
2027         /* must be in range */
2028         if (*f_pos + count > dd->kregend - dd->kregbase)
2029                 return -EINVAL;
2030
2031         base = (void __iomem *)dd->kregbase + *f_pos;
2032         csr_off = *f_pos;
2033         in_lcb = 0;
2034         for (total = 0; total < count; total += 8, csr_off += 8) {
2035                 if (get_user(data, (unsigned long __user *)(buf + total)))
2036                         break;
2037                 /* accessing LCB CSRs requires a special procedure */
2038                 if (is_lcb_offset(csr_off)) {
2039                         if (!in_lcb) {
2040                                 int ret = acquire_lcb_access(dd, 1);
2041
2042                                 if (ret)
2043                                         break;
2044                                 in_lcb = 1;
2045                         }
2046                 } else {
2047                         if (in_lcb) {
2048                                 release_lcb_access(dd, 1);
2049                                 in_lcb = 0;
2050                         }
2051                 }
2052                 writeq(data, base + total);
2053         }
2054         if (in_lcb)
2055                 release_lcb_access(dd, 1);
2056         *f_pos += total;
2057         return total;
2058 }
2059
2060 static const struct file_operations ui_file_ops = {
2061         .owner = THIS_MODULE,
2062         .llseek = ui_lseek,
2063         .read = ui_read,
2064         .write = ui_write,
2065         .open = ui_open,
2066         .release = ui_release,
2067 };
2068 #define UI_OFFSET 192   /* device minor offset for UI devices */
2069 static int create_ui = 1;
2070
2071 static struct cdev wildcard_cdev;
2072 static struct device *wildcard_device;
2073
2074 static atomic_t user_count = ATOMIC_INIT(0);
2075
2076 static void user_remove(struct hfi1_devdata *dd)
2077 {
2078         if (atomic_dec_return(&user_count) == 0)
2079                 hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2080
2081         hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2082         hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
2083 }
2084
2085 static int user_add(struct hfi1_devdata *dd)
2086 {
2087         char name[10];
2088         int ret;
2089
2090         if (atomic_inc_return(&user_count) == 1) {
2091                 ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
2092                                      &wildcard_cdev, &wildcard_device);
2093                 if (ret)
2094                         goto done;
2095         }
2096
2097         snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
2098         ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
2099                              &dd->user_cdev, &dd->user_device);
2100         if (ret)
2101                 goto done;
2102
2103         if (create_ui) {
2104                 snprintf(name, sizeof(name),
2105                          "%s_ui%d", class_name(), dd->unit);
2106                 ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
2107                                      &dd->ui_cdev, &dd->ui_device);
2108                 if (ret)
2109                         goto done;
2110         }
2111
2112         return 0;
2113 done:
2114         user_remove(dd);
2115         return ret;
2116 }
2117
2118 /*
2119  * Create per-unit files in /dev
2120  */
2121 int hfi1_device_create(struct hfi1_devdata *dd)
2122 {
2123         int r, ret;
2124
2125         r = user_add(dd);
2126         ret = hfi1_diag_add(dd);
2127         if (r && !ret)
2128                 ret = r;
2129         return ret;
2130 }
2131
2132 /*
2133  * Remove per-unit files in /dev
2134  * void, core kernel returns no errors for this stuff
2135  */
2136 void hfi1_device_remove(struct hfi1_devdata *dd)
2137 {
2138         user_remove(dd);
2139         hfi1_diag_remove(dd);
2140 }