2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
8 #define KMSG_COMPONENT "zpci"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/compat.h>
12 #include <linux/kernel.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/delay.h>
17 #include <linux/pci.h>
18 #include <linux/uaccess.h>
19 #include <asm/pci_debug.h>
20 #include <asm/pci_clp.h>
21 #include <asm/compat.h>
23 #include <uapi/asm/clp.h>
25 static inline void zpci_err_clp(unsigned int rsp, int rc)
30 } __packed data = {rsp, rc};
32 zpci_err_hex(&data, sizeof(data));
36 * Call Logical Processor with c=1, lps=0 and command 1
37 * to get the bit mask of installed logical processors
39 static inline int clp_get_ilp(unsigned long *ilp)
45 " .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
50 : [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
57 * Call Logical Processor with c=0, the give constant lps and an lpcb request.
59 static inline int clp_req(void *data, unsigned int lps)
61 struct { u8 _[CLP_BLK_SIZE]; } *req = data;
66 " .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
71 : [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
72 : [req] "a" (req), [lps] "i" (lps)
77 static void *clp_alloc_block(gfp_t gfp_mask)
79 return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
82 static void clp_free_block(void *ptr)
84 free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
87 static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
88 struct clp_rsp_query_pci_grp *response)
90 zdev->tlb_refresh = response->refresh;
91 zdev->dma_mask = response->dasm;
92 zdev->msi_addr = response->msia;
93 zdev->max_msi = response->noi;
94 zdev->fmb_update = response->mui;
96 switch (response->version) {
98 zdev->max_bus_speed = PCIE_SPEED_5_0GT;
101 zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
106 static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
108 struct clp_req_rsp_query_pci_grp *rrb;
111 rrb = clp_alloc_block(GFP_KERNEL);
115 memset(rrb, 0, sizeof(*rrb));
116 rrb->request.hdr.len = sizeof(rrb->request);
117 rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
118 rrb->response.hdr.len = sizeof(rrb->response);
119 rrb->request.pfgid = pfgid;
121 rc = clp_req(rrb, CLP_LPS_PCI);
122 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
123 clp_store_query_pci_fngrp(zdev, &rrb->response);
125 zpci_err("Q PCI FGRP:\n");
126 zpci_err_clp(rrb->response.hdr.rsp, rc);
133 static int clp_store_query_pci_fn(struct zpci_dev *zdev,
134 struct clp_rsp_query_pci *response)
138 for (i = 0; i < PCI_BAR_COUNT; i++) {
139 zdev->bars[i].val = le32_to_cpu(response->bar[i]);
140 zdev->bars[i].size = response->bar_size[i];
142 zdev->start_dma = response->sdma;
143 zdev->end_dma = response->edma;
144 zdev->pchid = response->pchid;
145 zdev->pfgid = response->pfgid;
146 zdev->pft = response->pft;
147 zdev->vfn = response->vfn;
148 zdev->uid = response->uid;
150 memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
151 if (response->util_str_avail) {
152 memcpy(zdev->util_str, response->util_str,
153 sizeof(zdev->util_str));
159 static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
161 struct clp_req_rsp_query_pci *rrb;
164 rrb = clp_alloc_block(GFP_KERNEL);
168 memset(rrb, 0, sizeof(*rrb));
169 rrb->request.hdr.len = sizeof(rrb->request);
170 rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
171 rrb->response.hdr.len = sizeof(rrb->response);
172 rrb->request.fh = fh;
174 rc = clp_req(rrb, CLP_LPS_PCI);
175 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
176 rc = clp_store_query_pci_fn(zdev, &rrb->response);
179 if (rrb->response.pfgid)
180 rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
182 zpci_err("Q PCI FN:\n");
183 zpci_err_clp(rrb->response.hdr.rsp, rc);
191 int clp_add_pci_device(u32 fid, u32 fh, int configured)
193 struct zpci_dev *zdev;
196 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
197 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
204 /* Query function properties and update zdev */
205 rc = clp_query_pci_fn(zdev, fh);
210 zdev->state = ZPCI_FN_STATE_CONFIGURED;
212 zdev->state = ZPCI_FN_STATE_STANDBY;
214 rc = zpci_create_device(zdev);
225 * Enable/Disable a given PCI function defined by its function handle.
227 static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
229 struct clp_req_rsp_set_pci *rrb;
230 int rc, retries = 100;
232 rrb = clp_alloc_block(GFP_KERNEL);
237 memset(rrb, 0, sizeof(*rrb));
238 rrb->request.hdr.len = sizeof(rrb->request);
239 rrb->request.hdr.cmd = CLP_SET_PCI_FN;
240 rrb->response.hdr.len = sizeof(rrb->response);
241 rrb->request.fh = *fh;
242 rrb->request.oc = command;
243 rrb->request.ndas = nr_dma_as;
245 rc = clp_req(rrb, CLP_LPS_PCI);
246 if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
252 } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
254 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
255 *fh = rrb->response.fh;
257 zpci_err("Set PCI FN:\n");
258 zpci_err_clp(rrb->response.hdr.rsp, rc);
265 int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
270 rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
272 /* Success -> store enabled handle in zdev */
275 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
279 int clp_disable_fh(struct zpci_dev *zdev)
284 if (!zdev_enabled(zdev))
287 rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
289 /* Success -> store disabled handle in zdev */
292 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
296 static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
297 void (*cb)(struct clp_fh_list_entry *entry))
299 u64 resume_token = 0;
303 memset(rrb, 0, sizeof(*rrb));
304 rrb->request.hdr.len = sizeof(rrb->request);
305 rrb->request.hdr.cmd = CLP_LIST_PCI;
306 /* store as many entries as possible */
307 rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
308 rrb->request.resume_token = resume_token;
310 /* Get PCI function handle list */
311 rc = clp_req(rrb, CLP_LPS_PCI);
312 if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
313 zpci_err("List PCI FN:\n");
314 zpci_err_clp(rrb->response.hdr.rsp, rc);
319 WARN_ON_ONCE(rrb->response.entry_size !=
320 sizeof(struct clp_fh_list_entry));
322 entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
323 rrb->response.entry_size;
325 resume_token = rrb->response.resume_token;
326 for (i = 0; i < entries; i++)
327 cb(&rrb->response.fh_list[i]);
328 } while (resume_token);
333 static void __clp_add(struct clp_fh_list_entry *entry)
335 if (!entry->vendor_id)
338 clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
341 static void __clp_rescan(struct clp_fh_list_entry *entry)
343 struct zpci_dev *zdev;
345 if (!entry->vendor_id)
348 zdev = get_zdev_by_fid(entry->fid);
350 clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
354 if (!entry->config_state) {
356 * The handle is already disabled, that means no iota/irq freeing via
357 * the firmware interfaces anymore. Need to free resources manually
358 * (DMA memory, debug, sysfs)...
360 zpci_stop_device(zdev);
364 static void __clp_update(struct clp_fh_list_entry *entry)
366 struct zpci_dev *zdev;
368 if (!entry->vendor_id)
371 zdev = get_zdev_by_fid(entry->fid);
375 zdev->fh = entry->fh;
378 int clp_scan_pci_devices(void)
380 struct clp_req_rsp_list_pci *rrb;
383 rrb = clp_alloc_block(GFP_KERNEL);
387 rc = clp_list_pci(rrb, __clp_add);
393 int clp_rescan_pci_devices(void)
395 struct clp_req_rsp_list_pci *rrb;
398 rrb = clp_alloc_block(GFP_KERNEL);
402 rc = clp_list_pci(rrb, __clp_rescan);
408 int clp_rescan_pci_devices_simple(void)
410 struct clp_req_rsp_list_pci *rrb;
413 rrb = clp_alloc_block(GFP_NOWAIT);
417 rc = clp_list_pci(rrb, __clp_update);
423 static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
425 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
427 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
428 lpcb->response.hdr.len > limit)
430 return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
433 static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
436 case 0x0001: /* store logical-processor characteristics */
437 return clp_base_slpc(req, (void *) lpcb);
443 static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
445 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
447 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
448 lpcb->response.hdr.len > limit)
450 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
453 static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
455 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
457 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
458 lpcb->response.hdr.len > limit)
460 if (lpcb->request.reserved2 != 0)
462 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
465 static int clp_pci_query(struct clp_req *req,
466 struct clp_req_rsp_query_pci *lpcb)
468 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
470 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
471 lpcb->response.hdr.len > limit)
473 if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
475 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
478 static int clp_pci_query_grp(struct clp_req *req,
479 struct clp_req_rsp_query_pci_grp *lpcb)
481 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
483 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
484 lpcb->response.hdr.len > limit)
486 if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
487 lpcb->request.reserved4 != 0)
489 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
492 static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
495 case 0x0001: /* store logical-processor characteristics */
496 return clp_pci_slpc(req, (void *) lpcb);
497 case 0x0002: /* list PCI functions */
498 return clp_pci_list(req, (void *) lpcb);
499 case 0x0003: /* query PCI function */
500 return clp_pci_query(req, (void *) lpcb);
501 case 0x0004: /* query PCI function group */
502 return clp_pci_query_grp(req, (void *) lpcb);
508 static int clp_normal_command(struct clp_req *req)
510 struct clp_req_hdr *lpcb;
515 if (req->lps != 0 && req->lps != 2)
519 lpcb = clp_alloc_block(GFP_KERNEL);
524 uptr = (void __force __user *)(unsigned long) req->data_p;
525 if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
529 if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
534 rc = clp_base_command(req, lpcb);
537 rc = clp_pci_command(req, lpcb);
544 if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
550 clp_free_block(lpcb);
555 static int clp_immediate_command(struct clp_req *req)
561 if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
564 uptr = (void __force __user *)(unsigned long) req->data_p;
566 /* Command code 0: test for a specific processor */
567 exists = test_bit_inv(req->lps, &ilp);
568 return put_user(exists, (int __user *) uptr);
570 /* Command code 1: return bit mask of installed processors */
571 return put_user(ilp, (unsigned long __user *) uptr);
574 static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
583 argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
584 if (copy_from_user(&req, argp, sizeof(req)))
588 return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
591 static int clp_misc_release(struct inode *inode, struct file *filp)
596 static const struct file_operations clp_misc_fops = {
597 .owner = THIS_MODULE,
598 .open = nonseekable_open,
599 .release = clp_misc_release,
600 .unlocked_ioctl = clp_misc_ioctl,
601 .compat_ioctl = clp_misc_ioctl,
605 static struct miscdevice clp_misc_device = {
606 .minor = MISC_DYNAMIC_MINOR,
608 .fops = &clp_misc_fops,
611 static int __init clp_misc_init(void)
613 return misc_register(&clp_misc_device);
616 device_initcall(clp_misc_init);