]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/ntb/ntb_hw.c
ntb: conslidate reading of PPD to move platform detection earlier
[karo-tx-linux.git] / drivers / ntb / ntb_hw.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *
9  *   This program is free software; you can redistribute it and/or modify
10  *   it under the terms of version 2 of the GNU General Public License as
11  *   published by the Free Software Foundation.
12  *
13  *   BSD LICENSE
14  *
15  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
16  *
17  *   Redistribution and use in source and binary forms, with or without
18  *   modification, are permitted provided that the following conditions
19  *   are met:
20  *
21  *     * Redistributions of source code must retain the above copyright
22  *       notice, this list of conditions and the following disclaimer.
23  *     * Redistributions in binary form must reproduce the above copy
24  *       notice, this list of conditions and the following disclaimer in
25  *       the documentation and/or other materials provided with the
26  *       distribution.
27  *     * Neither the name of Intel Corporation nor the names of its
28  *       contributors may be used to endorse or promote products derived
29  *       from this software without specific prior written permission.
30  *
31  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42  *
43  * Intel PCIe NTB Linux driver
44  *
45  * Contact Information:
46  * Jon Mason <jon.mason@intel.com>
47  */
48 #include <linux/debugfs.h>
49 #include <linux/delay.h>
50 #include <linux/init.h>
51 #include <linux/interrupt.h>
52 #include <linux/module.h>
53 #include <linux/pci.h>
54 #include <linux/random.h>
55 #include <linux/slab.h>
56 #include "ntb_hw.h"
57 #include "ntb_regs.h"
58
59 #define NTB_NAME        "Intel(R) PCI-E Non-Transparent Bridge Driver"
60 #define NTB_VER         "1.0"
61
62 MODULE_DESCRIPTION(NTB_NAME);
63 MODULE_VERSION(NTB_VER);
64 MODULE_LICENSE("Dual BSD/GPL");
65 MODULE_AUTHOR("Intel Corporation");
66
67 static bool xeon_errata_workaround = true;
68 module_param(xeon_errata_workaround, bool, 0644);
69 MODULE_PARM_DESC(xeon_errata_workaround, "Workaround for the Xeon Errata");
70
71 enum {
72         NTB_CONN_TRANSPARENT = 0,
73         NTB_CONN_B2B,
74         NTB_CONN_RP,
75 };
76
77 enum {
78         NTB_DEV_USD = 0,
79         NTB_DEV_DSD,
80 };
81
82 enum {
83         SNB_HW = 0,
84         BWD_HW,
85 };
86
87 static struct dentry *debugfs_dir;
88
89 #define BWD_LINK_RECOVERY_TIME  500
90
91 /* Translate memory window 0,1 to BAR 2,4 */
92 #define MW_TO_BAR(mw)   (mw * NTB_MAX_NUM_MW + 2)
93
94 static const struct pci_device_id ntb_pci_tbl[] = {
95         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
96         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
97         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
98         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
99         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
100         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
101         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
102         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
103         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
104         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
105         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
106         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
107         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
108         {0}
109 };
110 MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
111
112 static int is_ntb_xeon(struct ntb_device *ndev)
113 {
114         switch (ndev->pdev->device) {
115         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
116         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
117         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
118         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
119         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
120         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
121         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
122         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
123         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
124         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
125         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
126         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
127                 return 1;
128         default:
129                 return 0;
130         }
131
132         return 0;
133 }
134
135 static int is_ntb_atom(struct ntb_device *ndev)
136 {
137         switch (ndev->pdev->device) {
138         case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
139                 return 1;
140         default:
141                 return 0;
142         }
143
144         return 0;
145 }
146
147 /**
148  * ntb_register_event_callback() - register event callback
149  * @ndev: pointer to ntb_device instance
150  * @func: callback function to register
151  *
152  * This function registers a callback for any HW driver events such as link
153  * up/down, power management notices and etc.
154  *
155  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
156  */
157 int ntb_register_event_callback(struct ntb_device *ndev,
158                                 void (*func)(void *handle,
159                                              enum ntb_hw_event event))
160 {
161         if (ndev->event_cb)
162                 return -EINVAL;
163
164         ndev->event_cb = func;
165
166         return 0;
167 }
168
169 /**
170  * ntb_unregister_event_callback() - unregisters the event callback
171  * @ndev: pointer to ntb_device instance
172  *
173  * This function unregisters the existing callback from transport
174  */
175 void ntb_unregister_event_callback(struct ntb_device *ndev)
176 {
177         ndev->event_cb = NULL;
178 }
179
180 static void ntb_irq_work(unsigned long data)
181 {
182         struct ntb_db_cb *db_cb = (struct ntb_db_cb *)data;
183         int rc;
184
185         rc = db_cb->callback(db_cb->data, db_cb->db_num);
186         if (rc)
187                 tasklet_schedule(&db_cb->irq_work);
188         else {
189                 struct ntb_device *ndev = db_cb->ndev;
190                 unsigned long mask;
191
192                 mask = readw(ndev->reg_ofs.ldb_mask);
193                 clear_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
194                 writew(mask, ndev->reg_ofs.ldb_mask);
195         }
196 }
197
198 /**
199  * ntb_register_db_callback() - register a callback for doorbell interrupt
200  * @ndev: pointer to ntb_device instance
201  * @idx: doorbell index to register callback, zero based
202  * @data: pointer to be returned to caller with every callback
203  * @func: callback function to register
204  *
205  * This function registers a callback function for the doorbell interrupt
206  * on the primary side. The function will unmask the doorbell as well to
207  * allow interrupt.
208  *
209  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
210  */
211 int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
212                              void *data, int (*func)(void *data, int db_num))
213 {
214         unsigned long mask;
215
216         if (idx >= ndev->max_cbs || ndev->db_cb[idx].callback) {
217                 dev_warn(&ndev->pdev->dev, "Invalid Index.\n");
218                 return -EINVAL;
219         }
220
221         ndev->db_cb[idx].callback = func;
222         ndev->db_cb[idx].data = data;
223         ndev->db_cb[idx].ndev = ndev;
224
225         tasklet_init(&ndev->db_cb[idx].irq_work, ntb_irq_work,
226                      (unsigned long) &ndev->db_cb[idx]);
227
228         /* unmask interrupt */
229         mask = readw(ndev->reg_ofs.ldb_mask);
230         clear_bit(idx * ndev->bits_per_vector, &mask);
231         writew(mask, ndev->reg_ofs.ldb_mask);
232
233         return 0;
234 }
235
236 /**
237  * ntb_unregister_db_callback() - unregister a callback for doorbell interrupt
238  * @ndev: pointer to ntb_device instance
239  * @idx: doorbell index to register callback, zero based
240  *
241  * This function unregisters a callback function for the doorbell interrupt
242  * on the primary side. The function will also mask the said doorbell.
243  */
244 void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
245 {
246         unsigned long mask;
247
248         if (idx >= ndev->max_cbs || !ndev->db_cb[idx].callback)
249                 return;
250
251         mask = readw(ndev->reg_ofs.ldb_mask);
252         set_bit(idx * ndev->bits_per_vector, &mask);
253         writew(mask, ndev->reg_ofs.ldb_mask);
254
255         tasklet_disable(&ndev->db_cb[idx].irq_work);
256
257         ndev->db_cb[idx].callback = NULL;
258 }
259
260 /**
261  * ntb_find_transport() - find the transport pointer
262  * @transport: pointer to pci device
263  *
264  * Given the pci device pointer, return the transport pointer passed in when
265  * the transport attached when it was inited.
266  *
267  * RETURNS: pointer to transport.
268  */
269 void *ntb_find_transport(struct pci_dev *pdev)
270 {
271         struct ntb_device *ndev = pci_get_drvdata(pdev);
272         return ndev->ntb_transport;
273 }
274
275 /**
276  * ntb_register_transport() - Register NTB transport with NTB HW driver
277  * @transport: transport identifier
278  *
279  * This function allows a transport to reserve the hardware driver for
280  * NTB usage.
281  *
282  * RETURNS: pointer to ntb_device, NULL on error.
283  */
284 struct ntb_device *ntb_register_transport(struct pci_dev *pdev, void *transport)
285 {
286         struct ntb_device *ndev = pci_get_drvdata(pdev);
287
288         if (ndev->ntb_transport)
289                 return NULL;
290
291         ndev->ntb_transport = transport;
292         return ndev;
293 }
294
295 /**
296  * ntb_unregister_transport() - Unregister the transport with the NTB HW driver
297  * @ndev - ntb_device of the transport to be freed
298  *
299  * This function unregisters the transport from the HW driver and performs any
300  * necessary cleanups.
301  */
302 void ntb_unregister_transport(struct ntb_device *ndev)
303 {
304         int i;
305
306         if (!ndev->ntb_transport)
307                 return;
308
309         for (i = 0; i < ndev->max_cbs; i++)
310                 ntb_unregister_db_callback(ndev, i);
311
312         ntb_unregister_event_callback(ndev);
313         ndev->ntb_transport = NULL;
314 }
315
316 /**
317  * ntb_write_local_spad() - write to the secondary scratchpad register
318  * @ndev: pointer to ntb_device instance
319  * @idx: index to the scratchpad register, 0 based
320  * @val: the data value to put into the register
321  *
322  * This function allows writing of a 32bit value to the indexed scratchpad
323  * register. This writes over the data mirrored to the local scratchpad register
324  * by the remote system.
325  *
326  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
327  */
328 int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
329 {
330         if (idx >= ndev->limits.max_spads)
331                 return -EINVAL;
332
333         dev_dbg(&ndev->pdev->dev, "Writing %x to local scratch pad index %d\n",
334                 val, idx);
335         writel(val, ndev->reg_ofs.spad_read + idx * 4);
336
337         return 0;
338 }
339
340 /**
341  * ntb_read_local_spad() - read from the primary scratchpad register
342  * @ndev: pointer to ntb_device instance
343  * @idx: index to scratchpad register, 0 based
344  * @val: pointer to 32bit integer for storing the register value
345  *
346  * This function allows reading of the 32bit scratchpad register on
347  * the primary (internal) side.  This allows the local system to read data
348  * written and mirrored to the scratchpad register by the remote system.
349  *
350  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
351  */
352 int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
353 {
354         if (idx >= ndev->limits.max_spads)
355                 return -EINVAL;
356
357         *val = readl(ndev->reg_ofs.spad_write + idx * 4);
358         dev_dbg(&ndev->pdev->dev,
359                 "Reading %x from local scratch pad index %d\n", *val, idx);
360
361         return 0;
362 }
363
364 /**
365  * ntb_write_remote_spad() - write to the secondary scratchpad register
366  * @ndev: pointer to ntb_device instance
367  * @idx: index to the scratchpad register, 0 based
368  * @val: the data value to put into the register
369  *
370  * This function allows writing of a 32bit value to the indexed scratchpad
371  * register. The register resides on the secondary (external) side.  This allows
372  * the local system to write data to be mirrored to the remote systems
373  * scratchpad register.
374  *
375  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
376  */
377 int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
378 {
379         if (idx >= ndev->limits.max_spads)
380                 return -EINVAL;
381
382         dev_dbg(&ndev->pdev->dev, "Writing %x to remote scratch pad index %d\n",
383                 val, idx);
384         writel(val, ndev->reg_ofs.spad_write + idx * 4);
385
386         return 0;
387 }
388
389 /**
390  * ntb_read_remote_spad() - read from the primary scratchpad register
391  * @ndev: pointer to ntb_device instance
392  * @idx: index to scratchpad register, 0 based
393  * @val: pointer to 32bit integer for storing the register value
394  *
395  * This function allows reading of the 32bit scratchpad register on
396  * the primary (internal) side.  This alloows the local system to read the data
397  * it wrote to be mirrored on the remote system.
398  *
399  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
400  */
401 int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
402 {
403         if (idx >= ndev->limits.max_spads)
404                 return -EINVAL;
405
406         *val = readl(ndev->reg_ofs.spad_read + idx * 4);
407         dev_dbg(&ndev->pdev->dev,
408                 "Reading %x from remote scratch pad index %d\n", *val, idx);
409
410         return 0;
411 }
412
413 /**
414  * ntb_get_mw_base() - get addr for the NTB memory window
415  * @ndev: pointer to ntb_device instance
416  * @mw: memory window number
417  *
418  * This function provides the base address of the memory window specified.
419  *
420  * RETURNS: address, or NULL on error.
421  */
422 resource_size_t ntb_get_mw_base(struct ntb_device *ndev, unsigned int mw)
423 {
424         if (mw >= ntb_max_mw(ndev))
425                 return 0;
426
427         return pci_resource_start(ndev->pdev, MW_TO_BAR(mw));
428 }
429
430 /**
431  * ntb_get_mw_vbase() - get virtual addr for the NTB memory window
432  * @ndev: pointer to ntb_device instance
433  * @mw: memory window number
434  *
435  * This function provides the base virtual address of the memory window
436  * specified.
437  *
438  * RETURNS: pointer to virtual address, or NULL on error.
439  */
440 void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
441 {
442         if (mw >= ntb_max_mw(ndev))
443                 return NULL;
444
445         return ndev->mw[mw].vbase;
446 }
447
448 /**
449  * ntb_get_mw_size() - return size of NTB memory window
450  * @ndev: pointer to ntb_device instance
451  * @mw: memory window number
452  *
453  * This function provides the physical size of the memory window specified
454  *
455  * RETURNS: the size of the memory window or zero on error
456  */
457 u64 ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
458 {
459         if (mw >= ntb_max_mw(ndev))
460                 return 0;
461
462         return ndev->mw[mw].bar_sz;
463 }
464
465 /**
466  * ntb_set_mw_addr - set the memory window address
467  * @ndev: pointer to ntb_device instance
468  * @mw: memory window number
469  * @addr: base address for data
470  *
471  * This function sets the base physical address of the memory window.  This
472  * memory address is where data from the remote system will be transfered into
473  * or out of depending on how the transport is configured.
474  */
475 void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
476 {
477         if (mw >= ntb_max_mw(ndev))
478                 return;
479
480         dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
481                 MW_TO_BAR(mw));
482
483         ndev->mw[mw].phys_addr = addr;
484
485         switch (MW_TO_BAR(mw)) {
486         case NTB_BAR_23:
487                 writeq(addr, ndev->reg_ofs.bar2_xlat);
488                 break;
489         case NTB_BAR_45:
490                 writeq(addr, ndev->reg_ofs.bar4_xlat);
491                 break;
492         }
493 }
494
495 /**
496  * ntb_ring_doorbell() - Set the doorbell on the secondary/external side
497  * @ndev: pointer to ntb_device instance
498  * @db: doorbell to ring
499  *
500  * This function allows triggering of a doorbell on the secondary/external
501  * side that will initiate an interrupt on the remote host
502  *
503  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
504  */
505 void ntb_ring_doorbell(struct ntb_device *ndev, unsigned int db)
506 {
507         dev_dbg(&ndev->pdev->dev, "%s: ringing doorbell %d\n", __func__, db);
508
509         if (ndev->hw_type == BWD_HW)
510                 writeq((u64) 1 << db, ndev->reg_ofs.rdb);
511         else
512                 writew(((1 << ndev->bits_per_vector) - 1) <<
513                        (db * ndev->bits_per_vector), ndev->reg_ofs.rdb);
514 }
515
516 static void bwd_recover_link(struct ntb_device *ndev)
517 {
518         u32 status;
519
520         /* Driver resets the NTB ModPhy lanes - magic! */
521         writeb(0xe0, ndev->reg_base + BWD_MODPHY_PCSREG6);
522         writeb(0x40, ndev->reg_base + BWD_MODPHY_PCSREG4);
523         writeb(0x60, ndev->reg_base + BWD_MODPHY_PCSREG4);
524         writeb(0x60, ndev->reg_base + BWD_MODPHY_PCSREG6);
525
526         /* Driver waits 100ms to allow the NTB ModPhy to settle */
527         msleep(100);
528
529         /* Clear AER Errors, write to clear */
530         status = readl(ndev->reg_base + BWD_ERRCORSTS_OFFSET);
531         dev_dbg(&ndev->pdev->dev, "ERRCORSTS = %x\n", status);
532         status &= PCI_ERR_COR_REP_ROLL;
533         writel(status, ndev->reg_base + BWD_ERRCORSTS_OFFSET);
534
535         /* Clear unexpected electrical idle event in LTSSM, write to clear */
536         status = readl(ndev->reg_base + BWD_LTSSMERRSTS0_OFFSET);
537         dev_dbg(&ndev->pdev->dev, "LTSSMERRSTS0 = %x\n", status);
538         status |= BWD_LTSSMERRSTS0_UNEXPECTEDEI;
539         writel(status, ndev->reg_base + BWD_LTSSMERRSTS0_OFFSET);
540
541         /* Clear DeSkew Buffer error, write to clear */
542         status = readl(ndev->reg_base + BWD_DESKEWSTS_OFFSET);
543         dev_dbg(&ndev->pdev->dev, "DESKEWSTS = %x\n", status);
544         status |= BWD_DESKEWSTS_DBERR;
545         writel(status, ndev->reg_base + BWD_DESKEWSTS_OFFSET);
546
547         status = readl(ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
548         dev_dbg(&ndev->pdev->dev, "IBSTERRRCRVSTS0 = %x\n", status);
549         status &= BWD_IBIST_ERR_OFLOW;
550         writel(status, ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
551
552         /* Releases the NTB state machine to allow the link to retrain */
553         status = readl(ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
554         dev_dbg(&ndev->pdev->dev, "LTSSMSTATEJMP = %x\n", status);
555         status &= ~BWD_LTSSMSTATEJMP_FORCEDETECT;
556         writel(status, ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
557 }
558
559 static void ntb_link_event(struct ntb_device *ndev, int link_state)
560 {
561         unsigned int event;
562
563         if (ndev->link_status == link_state)
564                 return;
565
566         if (link_state == NTB_LINK_UP) {
567                 u16 status;
568
569                 dev_info(&ndev->pdev->dev, "Link Up\n");
570                 ndev->link_status = NTB_LINK_UP;
571                 event = NTB_EVENT_HW_LINK_UP;
572
573                 if (is_ntb_atom(ndev) ||
574                     ndev->conn_type == NTB_CONN_TRANSPARENT)
575                         status = readw(ndev->reg_ofs.lnk_stat);
576                 else {
577                         int rc = pci_read_config_word(ndev->pdev,
578                                                       SNB_LINK_STATUS_OFFSET,
579                                                       &status);
580                         if (rc)
581                                 return;
582                 }
583
584                 ndev->link_width = (status & NTB_LINK_WIDTH_MASK) >> 4;
585                 ndev->link_speed = (status & NTB_LINK_SPEED_MASK);
586                 dev_info(&ndev->pdev->dev, "Link Width %d, Link Speed %d\n",
587                          ndev->link_width, ndev->link_speed);
588         } else {
589                 dev_info(&ndev->pdev->dev, "Link Down\n");
590                 ndev->link_status = NTB_LINK_DOWN;
591                 event = NTB_EVENT_HW_LINK_DOWN;
592                 /* Don't modify link width/speed, we need it in link recovery */
593         }
594
595         /* notify the upper layer if we have an event change */
596         if (ndev->event_cb)
597                 ndev->event_cb(ndev->ntb_transport, event);
598 }
599
600 static int ntb_link_status(struct ntb_device *ndev)
601 {
602         int link_state;
603
604         if (is_ntb_atom(ndev)) {
605                 u32 ntb_cntl;
606
607                 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
608                 if (ntb_cntl & BWD_CNTL_LINK_DOWN)
609                         link_state = NTB_LINK_DOWN;
610                 else
611                         link_state = NTB_LINK_UP;
612         } else {
613                 u16 status;
614                 int rc;
615
616                 rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET,
617                                           &status);
618                 if (rc)
619                         return rc;
620
621                 if (status & NTB_LINK_STATUS_ACTIVE)
622                         link_state = NTB_LINK_UP;
623                 else
624                         link_state = NTB_LINK_DOWN;
625         }
626
627         ntb_link_event(ndev, link_state);
628
629         return 0;
630 }
631
632 static void bwd_link_recovery(struct work_struct *work)
633 {
634         struct ntb_device *ndev = container_of(work, struct ntb_device,
635                                                lr_timer.work);
636         u32 status32;
637
638         bwd_recover_link(ndev);
639         /* There is a potential race between the 2 NTB devices recovering at the
640          * same time.  If the times are the same, the link will not recover and
641          * the driver will be stuck in this loop forever.  Add a random interval
642          * to the recovery time to prevent this race.
643          */
644         msleep(BWD_LINK_RECOVERY_TIME + prandom_u32() % BWD_LINK_RECOVERY_TIME);
645
646         status32 = readl(ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
647         if (status32 & BWD_LTSSMSTATEJMP_FORCEDETECT)
648                 goto retry;
649
650         status32 = readl(ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
651         if (status32 & BWD_IBIST_ERR_OFLOW)
652                 goto retry;
653
654         status32 = readl(ndev->reg_ofs.lnk_cntl);
655         if (!(status32 & BWD_CNTL_LINK_DOWN)) {
656                 unsigned char speed, width;
657                 u16 status16;
658
659                 status16 = readw(ndev->reg_ofs.lnk_stat);
660                 width = (status16 & NTB_LINK_WIDTH_MASK) >> 4;
661                 speed = (status16 & NTB_LINK_SPEED_MASK);
662                 if (ndev->link_width != width || ndev->link_speed != speed)
663                         goto retry;
664         }
665
666         schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
667         return;
668
669 retry:
670         schedule_delayed_work(&ndev->lr_timer, NTB_HB_TIMEOUT);
671 }
672
673 /* BWD doesn't have link status interrupt, poll on that platform */
674 static void bwd_link_poll(struct work_struct *work)
675 {
676         struct ntb_device *ndev = container_of(work, struct ntb_device,
677                                                hb_timer.work);
678         unsigned long ts = jiffies;
679
680         /* If we haven't gotten an interrupt in a while, check the BWD link
681          * status bit
682          */
683         if (ts > ndev->last_ts + NTB_HB_TIMEOUT) {
684                 int rc = ntb_link_status(ndev);
685                 if (rc)
686                         dev_err(&ndev->pdev->dev,
687                                 "Error determining link status\n");
688
689                 /* Check to see if a link error is the cause of the link down */
690                 if (ndev->link_status == NTB_LINK_DOWN) {
691                         u32 status32 = readl(ndev->reg_base +
692                                              BWD_LTSSMSTATEJMP_OFFSET);
693                         if (status32 & BWD_LTSSMSTATEJMP_FORCEDETECT) {
694                                 schedule_delayed_work(&ndev->lr_timer, 0);
695                                 return;
696                         }
697                 }
698         }
699
700         schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
701 }
702
703 static int ntb_xeon_setup(struct ntb_device *ndev)
704 {
705         switch (ndev->conn_type) {
706         case NTB_CONN_B2B:
707                 ndev->reg_ofs.ldb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
708                 ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
709                 ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
710                 ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
711                 ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
712                 ndev->limits.max_spads = SNB_MAX_B2B_SPADS;
713
714                 /* There is a Xeon hardware errata related to writes to
715                  * SDOORBELL or B2BDOORBELL in conjunction with inbound access
716                  * to NTB MMIO Space, which may hang the system.  To workaround
717                  * this use the second memory window to access the interrupt and
718                  * scratch pad registers on the remote system.
719                  */
720                 if (xeon_errata_workaround) {
721                         if (!ndev->mw[1].bar_sz)
722                                 return -EINVAL;
723
724                         ndev->limits.max_mw = SNB_ERRATA_MAX_MW;
725                         ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
726                         ndev->reg_ofs.spad_write = ndev->mw[1].vbase +
727                                                    SNB_SPAD_OFFSET;
728                         ndev->reg_ofs.rdb = ndev->mw[1].vbase +
729                                             SNB_PDOORBELL_OFFSET;
730
731                         /* Set the Limit register to 4k, the minimum size, to
732                          * prevent an illegal access
733                          */
734                         writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base +
735                                SNB_PBAR4LMT_OFFSET);
736                         /* HW errata on the Limit registers.  They can only be
737                          * written when the base register is 4GB aligned and
738                          * < 32bit.  This should already be the case based on
739                          * the driver defaults, but write the Limit registers
740                          * first just in case.
741                          */
742                 } else {
743                         ndev->limits.max_mw = SNB_MAX_MW;
744
745                         /* HW Errata on bit 14 of b2bdoorbell register.  Writes
746                          * will not be mirrored to the remote system.  Shrink
747                          * the number of bits by one, since bit 14 is the last
748                          * bit.
749                          */
750                         ndev->limits.max_db_bits = SNB_MAX_DB_BITS - 1;
751                         ndev->reg_ofs.spad_write = ndev->reg_base +
752                                                    SNB_B2B_SPAD_OFFSET;
753                         ndev->reg_ofs.rdb = ndev->reg_base +
754                                             SNB_B2B_DOORBELL_OFFSET;
755
756                         /* Disable the Limit register, just incase it is set to
757                          * something silly
758                          */
759                         writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
760                         /* HW errata on the Limit registers.  They can only be
761                          * written when the base register is 4GB aligned and
762                          * < 32bit.  This should already be the case based on
763                          * the driver defaults, but write the Limit registers
764                          * first just in case.
765                          */
766                 }
767
768                 /* The Xeon errata workaround requires setting SBAR Base
769                  * addresses to known values, so that the PBAR XLAT can be
770                  * pointed at SBAR0 of the remote system.
771                  */
772                 if (ndev->dev_type == NTB_DEV_USD) {
773                         writeq(SNB_MBAR23_DSD_ADDR, ndev->reg_base +
774                                SNB_PBAR2XLAT_OFFSET);
775                         if (xeon_errata_workaround)
776                                 writeq(SNB_MBAR01_DSD_ADDR, ndev->reg_base +
777                                        SNB_PBAR4XLAT_OFFSET);
778                         else {
779                                 writeq(SNB_MBAR45_DSD_ADDR, ndev->reg_base +
780                                        SNB_PBAR4XLAT_OFFSET);
781                                 /* B2B_XLAT_OFFSET is a 64bit register, but can
782                                  * only take 32bit writes
783                                  */
784                                 writel(SNB_MBAR01_DSD_ADDR & 0xffffffff,
785                                        ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
786                                 writel(SNB_MBAR01_DSD_ADDR >> 32,
787                                        ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
788                         }
789
790                         writeq(SNB_MBAR01_USD_ADDR, ndev->reg_base +
791                                SNB_SBAR0BASE_OFFSET);
792                         writeq(SNB_MBAR23_USD_ADDR, ndev->reg_base +
793                                SNB_SBAR2BASE_OFFSET);
794                         writeq(SNB_MBAR45_USD_ADDR, ndev->reg_base +
795                                SNB_SBAR4BASE_OFFSET);
796                 } else {
797                         writeq(SNB_MBAR23_USD_ADDR, ndev->reg_base +
798                                SNB_PBAR2XLAT_OFFSET);
799                         if (xeon_errata_workaround)
800                                 writeq(SNB_MBAR01_USD_ADDR, ndev->reg_base +
801                                        SNB_PBAR4XLAT_OFFSET);
802                         else {
803                                 writeq(SNB_MBAR45_USD_ADDR, ndev->reg_base +
804                                        SNB_PBAR4XLAT_OFFSET);
805                                 /* B2B_XLAT_OFFSET is a 64bit register, but can
806                                  * only take 32bit writes
807                                  */
808                                 writel(SNB_MBAR01_USD_ADDR & 0xffffffff,
809                                        ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
810                                 writel(SNB_MBAR01_USD_ADDR >> 32,
811                                        ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
812                         }
813                         writeq(SNB_MBAR01_DSD_ADDR, ndev->reg_base +
814                                SNB_SBAR0BASE_OFFSET);
815                         writeq(SNB_MBAR23_DSD_ADDR, ndev->reg_base +
816                                SNB_SBAR2BASE_OFFSET);
817                         writeq(SNB_MBAR45_DSD_ADDR, ndev->reg_base +
818                                SNB_SBAR4BASE_OFFSET);
819                 }
820                 break;
821         case NTB_CONN_RP:
822                 if (xeon_errata_workaround) {
823                         dev_err(&ndev->pdev->dev,
824                                 "NTB-RP disabled due to hardware errata.  To disregard this warning and potentially lock-up the system, add the parameter 'xeon_errata_workaround=0'.\n");
825                         return -EINVAL;
826                 }
827
828                 /* Scratch pads need to have exclusive access from the primary
829                  * or secondary side.  Halve the num spads so that each side can
830                  * have an equal amount.
831                  */
832                 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
833                 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
834                 /* Note: The SDOORBELL is the cause of the errata.  You REALLY
835                  * don't want to touch it.
836                  */
837                 ndev->reg_ofs.rdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
838                 ndev->reg_ofs.ldb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
839                 ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
840                 /* Offset the start of the spads to correspond to whether it is
841                  * primary or secondary
842                  */
843                 ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET +
844                                            ndev->limits.max_spads * 4;
845                 ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
846                 ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
847                 ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
848                 ndev->limits.max_mw = SNB_MAX_MW;
849                 break;
850         case NTB_CONN_TRANSPARENT:
851                 /* Scratch pads need to have exclusive access from the primary
852                  * or secondary side.  Halve the num spads so that each side can
853                  * have an equal amount.
854                  */
855                 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
856                 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
857                 ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
858                 ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
859                 ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET;
860                 ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
861                 /* Offset the start of the spads to correspond to whether it is
862                  * primary or secondary
863                  */
864                 ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET +
865                                           ndev->limits.max_spads * 4;
866                 ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_PBAR2XLAT_OFFSET;
867                 ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_PBAR4XLAT_OFFSET;
868
869                 ndev->limits.max_mw = SNB_MAX_MW;
870                 break;
871         default:
872                 /*
873                  * we should never hit this. the detect function should've
874                  * take cared of everything.
875                  */
876                 return -EINVAL;
877         }
878
879         ndev->reg_ofs.lnk_cntl = ndev->reg_base + SNB_NTBCNTL_OFFSET;
880         ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET;
881         ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
882
883         ndev->limits.msix_cnt = SNB_MSIX_CNT;
884         ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
885
886         return 0;
887 }
888
889 static int ntb_bwd_setup(struct ntb_device *ndev)
890 {
891         int rc;
892         u32 val;
893
894         ndev->hw_type = BWD_HW;
895
896         rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &val);
897         if (rc)
898                 return rc;
899
900         switch ((val & BWD_PPD_CONN_TYPE) >> 8) {
901         case NTB_CONN_B2B:
902                 ndev->conn_type = NTB_CONN_B2B;
903                 break;
904         case NTB_CONN_RP:
905         default:
906                 dev_err(&ndev->pdev->dev, "Unsupported NTB configuration\n");
907                 return -EINVAL;
908         }
909
910         if (val & BWD_PPD_DEV_TYPE)
911                 ndev->dev_type = NTB_DEV_DSD;
912         else
913                 ndev->dev_type = NTB_DEV_USD;
914
915         /* Initiate PCI-E link training */
916         rc = pci_write_config_dword(ndev->pdev, NTB_PPD_OFFSET,
917                                     val | BWD_PPD_INIT_LINK);
918         if (rc)
919                 return rc;
920
921         ndev->reg_ofs.ldb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
922         ndev->reg_ofs.ldb_mask = ndev->reg_base + BWD_PDBMSK_OFFSET;
923         ndev->reg_ofs.rdb = ndev->reg_base + BWD_B2B_DOORBELL_OFFSET;
924         ndev->reg_ofs.bar2_xlat = ndev->reg_base + BWD_SBAR2XLAT_OFFSET;
925         ndev->reg_ofs.bar4_xlat = ndev->reg_base + BWD_SBAR4XLAT_OFFSET;
926         ndev->reg_ofs.lnk_cntl = ndev->reg_base + BWD_NTBCNTL_OFFSET;
927         ndev->reg_ofs.lnk_stat = ndev->reg_base + BWD_LINK_STATUS_OFFSET;
928         ndev->reg_ofs.spad_read = ndev->reg_base + BWD_SPAD_OFFSET;
929         ndev->reg_ofs.spad_write = ndev->reg_base + BWD_B2B_SPAD_OFFSET;
930         ndev->reg_ofs.spci_cmd = ndev->reg_base + BWD_PCICMD_OFFSET;
931         ndev->limits.max_mw = BWD_MAX_MW;
932         ndev->limits.max_spads = BWD_MAX_SPADS;
933         ndev->limits.max_db_bits = BWD_MAX_DB_BITS;
934         ndev->limits.msix_cnt = BWD_MSIX_CNT;
935         ndev->bits_per_vector = BWD_DB_BITS_PER_VEC;
936
937         /* Since bwd doesn't have a link interrupt, setup a poll timer */
938         INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_poll);
939         INIT_DELAYED_WORK(&ndev->lr_timer, bwd_link_recovery);
940         schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
941
942         return 0;
943 }
944
945 static int ntb_device_setup(struct ntb_device *ndev)
946 {
947         int rc;
948
949         if (is_ntb_xeon(ndev))
950                 rc = ntb_xeon_setup(ndev);
951         else if (is_ntb_atom(ndev))
952                 rc = ntb_bwd_setup(ndev);
953         else
954                 rc = -ENODEV;
955
956         if (rc)
957                 return rc;
958
959         if (ndev->conn_type == NTB_CONN_B2B)
960                 /* Enable Bus Master and Memory Space on the secondary side */
961                 writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
962                        ndev->reg_ofs.spci_cmd);
963
964         return 0;
965 }
966
967 static void ntb_device_free(struct ntb_device *ndev)
968 {
969         if (is_ntb_atom(ndev)) {
970                 cancel_delayed_work_sync(&ndev->hb_timer);
971                 cancel_delayed_work_sync(&ndev->lr_timer);
972         }
973 }
974
975 static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
976 {
977         struct ntb_db_cb *db_cb = data;
978         struct ntb_device *ndev = db_cb->ndev;
979         unsigned long mask;
980
981         dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
982                 db_cb->db_num);
983
984         mask = readw(ndev->reg_ofs.ldb_mask);
985         set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
986         writew(mask, ndev->reg_ofs.ldb_mask);
987
988         tasklet_schedule(&db_cb->irq_work);
989
990         /* No need to check for the specific HB irq, any interrupt means
991          * we're connected.
992          */
993         ndev->last_ts = jiffies;
994
995         writeq((u64) 1 << db_cb->db_num, ndev->reg_ofs.ldb);
996
997         return IRQ_HANDLED;
998 }
999
1000 static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
1001 {
1002         struct ntb_db_cb *db_cb = data;
1003         struct ntb_device *ndev = db_cb->ndev;
1004         unsigned long mask;
1005
1006         dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
1007                 db_cb->db_num);
1008
1009         mask = readw(ndev->reg_ofs.ldb_mask);
1010         set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
1011         writew(mask, ndev->reg_ofs.ldb_mask);
1012
1013         tasklet_schedule(&db_cb->irq_work);
1014
1015         /* On Sandybridge, there are 16 bits in the interrupt register
1016          * but only 4 vectors.  So, 5 bits are assigned to the first 3
1017          * vectors, with the 4th having a single bit for link
1018          * interrupts.
1019          */
1020         writew(((1 << ndev->bits_per_vector) - 1) <<
1021                (db_cb->db_num * ndev->bits_per_vector), ndev->reg_ofs.ldb);
1022
1023         return IRQ_HANDLED;
1024 }
1025
1026 /* Since we do not have a HW doorbell in BWD, this is only used in JF/JT */
1027 static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
1028 {
1029         struct ntb_device *ndev = dev;
1030         int rc;
1031
1032         dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for Events\n", irq);
1033
1034         rc = ntb_link_status(ndev);
1035         if (rc)
1036                 dev_err(&ndev->pdev->dev, "Error determining link status\n");
1037
1038         /* bit 15 is always the link bit */
1039         writew(1 << SNB_LINK_DB, ndev->reg_ofs.ldb);
1040
1041         return IRQ_HANDLED;
1042 }
1043
1044 static irqreturn_t ntb_interrupt(int irq, void *dev)
1045 {
1046         struct ntb_device *ndev = dev;
1047         unsigned int i = 0;
1048
1049         if (is_ntb_atom(ndev)) {
1050                 u64 ldb = readq(ndev->reg_ofs.ldb);
1051
1052                 dev_dbg(&ndev->pdev->dev, "irq %d - ldb = %Lx\n", irq, ldb);
1053
1054                 while (ldb) {
1055                         i = __ffs(ldb);
1056                         ldb &= ldb - 1;
1057                         bwd_callback_msix_irq(irq, &ndev->db_cb[i]);
1058                 }
1059         } else {
1060                 u16 ldb = readw(ndev->reg_ofs.ldb);
1061
1062                 dev_dbg(&ndev->pdev->dev, "irq %d - ldb = %x\n", irq, ldb);
1063
1064                 if (ldb & SNB_DB_HW_LINK) {
1065                         xeon_event_msix_irq(irq, dev);
1066                         ldb &= ~SNB_DB_HW_LINK;
1067                 }
1068
1069                 while (ldb) {
1070                         i = __ffs(ldb);
1071                         ldb &= ldb - 1;
1072                         xeon_callback_msix_irq(irq, &ndev->db_cb[i]);
1073                 }
1074         }
1075
1076         return IRQ_HANDLED;
1077 }
1078
1079 static int ntb_setup_snb_msix(struct ntb_device *ndev, int msix_entries)
1080 {
1081         struct pci_dev *pdev = ndev->pdev;
1082         struct msix_entry *msix;
1083         int rc, i;
1084
1085         if (msix_entries < ndev->limits.msix_cnt)
1086                 return -ENOSPC;
1087
1088         rc = pci_enable_msix_exact(pdev, ndev->msix_entries, msix_entries);
1089         if (rc < 0)
1090                 return rc;
1091
1092         for (i = 0; i < msix_entries; i++) {
1093                 msix = &ndev->msix_entries[i];
1094                 WARN_ON(!msix->vector);
1095
1096                 if (i == msix_entries - 1) {
1097                         rc = request_irq(msix->vector,
1098                                          xeon_event_msix_irq, 0,
1099                                          "ntb-event-msix", ndev);
1100                         if (rc)
1101                                 goto err;
1102                 } else {
1103                         rc = request_irq(msix->vector,
1104                                          xeon_callback_msix_irq, 0,
1105                                          "ntb-callback-msix",
1106                                          &ndev->db_cb[i]);
1107                         if (rc)
1108                                 goto err;
1109                 }
1110         }
1111
1112         ndev->num_msix = msix_entries;
1113         ndev->max_cbs = msix_entries - 1;
1114
1115         return 0;
1116
1117 err:
1118         while (--i >= 0) {
1119                 /* Code never reaches here for entry nr 'ndev->num_msix - 1' */
1120                 msix = &ndev->msix_entries[i];
1121                 free_irq(msix->vector, &ndev->db_cb[i]);
1122         }
1123
1124         pci_disable_msix(pdev);
1125         ndev->num_msix = 0;
1126
1127         return rc;
1128 }
1129
1130 static int ntb_setup_bwd_msix(struct ntb_device *ndev, int msix_entries)
1131 {
1132         struct pci_dev *pdev = ndev->pdev;
1133         struct msix_entry *msix;
1134         int rc, i;
1135
1136         msix_entries = pci_enable_msix_range(pdev, ndev->msix_entries,
1137                                              1, msix_entries);
1138         if (msix_entries < 0)
1139                 return msix_entries;
1140
1141         for (i = 0; i < msix_entries; i++) {
1142                 msix = &ndev->msix_entries[i];
1143                 WARN_ON(!msix->vector);
1144
1145                 rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
1146                                  "ntb-callback-msix", &ndev->db_cb[i]);
1147                 if (rc)
1148                         goto err;
1149         }
1150
1151         ndev->num_msix = msix_entries;
1152         ndev->max_cbs = msix_entries;
1153
1154         return 0;
1155
1156 err:
1157         while (--i >= 0)
1158                 free_irq(msix->vector, &ndev->db_cb[i]);
1159
1160         pci_disable_msix(pdev);
1161         ndev->num_msix = 0;
1162
1163         return rc;
1164 }
1165
1166 static int ntb_setup_msix(struct ntb_device *ndev)
1167 {
1168         struct pci_dev *pdev = ndev->pdev;
1169         int msix_entries;
1170         int rc, i;
1171
1172         msix_entries = pci_msix_vec_count(pdev);
1173         if (msix_entries < 0) {
1174                 rc = msix_entries;
1175                 goto err;
1176         } else if (msix_entries > ndev->limits.msix_cnt) {
1177                 rc = -EINVAL;
1178                 goto err;
1179         }
1180
1181         ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
1182                                      GFP_KERNEL);
1183         if (!ndev->msix_entries) {
1184                 rc = -ENOMEM;
1185                 goto err;
1186         }
1187
1188         for (i = 0; i < msix_entries; i++)
1189                 ndev->msix_entries[i].entry = i;
1190
1191         if (is_ntb_atom(ndev))
1192                 rc = ntb_setup_bwd_msix(ndev, msix_entries);
1193         else
1194                 rc = ntb_setup_snb_msix(ndev, msix_entries);
1195         if (rc)
1196                 goto err1;
1197
1198         return 0;
1199
1200 err1:
1201         kfree(ndev->msix_entries);
1202 err:
1203         dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
1204         return rc;
1205 }
1206
1207 static int ntb_setup_msi(struct ntb_device *ndev)
1208 {
1209         struct pci_dev *pdev = ndev->pdev;
1210         int rc;
1211
1212         rc = pci_enable_msi(pdev);
1213         if (rc)
1214                 return rc;
1215
1216         rc = request_irq(pdev->irq, ntb_interrupt, 0, "ntb-msi", ndev);
1217         if (rc) {
1218                 pci_disable_msi(pdev);
1219                 dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
1220                 return rc;
1221         }
1222
1223         return 0;
1224 }
1225
1226 static int ntb_setup_intx(struct ntb_device *ndev)
1227 {
1228         struct pci_dev *pdev = ndev->pdev;
1229         int rc;
1230
1231         pci_msi_off(pdev);
1232
1233         /* Verify intx is enabled */
1234         pci_intx(pdev, 1);
1235
1236         rc = request_irq(pdev->irq, ntb_interrupt, IRQF_SHARED, "ntb-intx",
1237                          ndev);
1238         if (rc)
1239                 return rc;
1240
1241         return 0;
1242 }
1243
1244 static int ntb_setup_interrupts(struct ntb_device *ndev)
1245 {
1246         int rc;
1247
1248         /* On BWD, disable all interrupts.  On SNB, disable all but Link
1249          * Interrupt.  The rest will be unmasked as callbacks are registered.
1250          */
1251         if (is_ntb_atom(ndev))
1252                 writeq(~0, ndev->reg_ofs.ldb_mask);
1253         else {
1254                 u16 var = 1 << SNB_LINK_DB;
1255                 writew(~var, ndev->reg_ofs.ldb_mask);
1256         }
1257
1258         rc = ntb_setup_msix(ndev);
1259         if (!rc)
1260                 goto done;
1261
1262         ndev->bits_per_vector = 1;
1263         ndev->max_cbs = ndev->limits.max_db_bits;
1264
1265         rc = ntb_setup_msi(ndev);
1266         if (!rc)
1267                 goto done;
1268
1269         rc = ntb_setup_intx(ndev);
1270         if (rc) {
1271                 dev_err(&ndev->pdev->dev, "no usable interrupts\n");
1272                 return rc;
1273         }
1274
1275 done:
1276         return 0;
1277 }
1278
1279 static void ntb_free_interrupts(struct ntb_device *ndev)
1280 {
1281         struct pci_dev *pdev = ndev->pdev;
1282
1283         /* mask interrupts */
1284         if (is_ntb_atom(ndev))
1285                 writeq(~0, ndev->reg_ofs.ldb_mask);
1286         else
1287                 writew(~0, ndev->reg_ofs.ldb_mask);
1288
1289         if (ndev->num_msix) {
1290                 struct msix_entry *msix;
1291                 u32 i;
1292
1293                 for (i = 0; i < ndev->num_msix; i++) {
1294                         msix = &ndev->msix_entries[i];
1295                         if (is_ntb_xeon(ndev) && i == ndev->num_msix - 1)
1296                                 free_irq(msix->vector, ndev);
1297                         else
1298                                 free_irq(msix->vector, &ndev->db_cb[i]);
1299                 }
1300                 pci_disable_msix(pdev);
1301                 kfree(ndev->msix_entries);
1302         } else {
1303                 free_irq(pdev->irq, ndev);
1304
1305                 if (pci_dev_msi_enabled(pdev))
1306                         pci_disable_msi(pdev);
1307         }
1308 }
1309
1310 static int ntb_create_callbacks(struct ntb_device *ndev)
1311 {
1312         int i;
1313
1314         /* Chicken-egg issue.  We won't know how many callbacks are necessary
1315          * until we see how many MSI-X vectors we get, but these pointers need
1316          * to be passed into the MSI-X register function.  So, we allocate the
1317          * max, knowing that they might not all be used, to work around this.
1318          */
1319         ndev->db_cb = kcalloc(ndev->limits.max_db_bits,
1320                               sizeof(struct ntb_db_cb),
1321                               GFP_KERNEL);
1322         if (!ndev->db_cb)
1323                 return -ENOMEM;
1324
1325         for (i = 0; i < ndev->limits.max_db_bits; i++) {
1326                 ndev->db_cb[i].db_num = i;
1327                 ndev->db_cb[i].ndev = ndev;
1328         }
1329
1330         return 0;
1331 }
1332
1333 static void ntb_free_callbacks(struct ntb_device *ndev)
1334 {
1335         int i;
1336
1337         for (i = 0; i < ndev->limits.max_db_bits; i++)
1338                 ntb_unregister_db_callback(ndev, i);
1339
1340         kfree(ndev->db_cb);
1341 }
1342
1343 static ssize_t ntb_debugfs_read(struct file *filp, char __user *ubuf,
1344                                 size_t count, loff_t *offp)
1345 {
1346         struct ntb_device *ndev;
1347         char *buf;
1348         ssize_t ret, offset, out_count;
1349
1350         out_count = 500;
1351
1352         buf = kmalloc(out_count, GFP_KERNEL);
1353         if (!buf)
1354                 return -ENOMEM;
1355
1356         ndev = filp->private_data;
1357         offset = 0;
1358         offset += snprintf(buf + offset, out_count - offset,
1359                            "NTB Device Information:\n");
1360         offset += snprintf(buf + offset, out_count - offset,
1361                            "Connection Type - \t\t%s\n",
1362                            ndev->conn_type == NTB_CONN_TRANSPARENT ?
1363                            "Transparent" : (ndev->conn_type == NTB_CONN_B2B) ?
1364                            "Back to back" : "Root Port");
1365         offset += snprintf(buf + offset, out_count - offset,
1366                            "Device Type - \t\t\t%s\n",
1367                            ndev->dev_type == NTB_DEV_USD ?
1368                            "DSD/USP" : "USD/DSP");
1369         offset += snprintf(buf + offset, out_count - offset,
1370                            "Max Number of Callbacks - \t%u\n",
1371                            ntb_max_cbs(ndev));
1372         offset += snprintf(buf + offset, out_count - offset,
1373                            "Link Status - \t\t\t%s\n",
1374                            ntb_hw_link_status(ndev) ? "Up" : "Down");
1375         if (ntb_hw_link_status(ndev)) {
1376                 offset += snprintf(buf + offset, out_count - offset,
1377                                    "Link Speed - \t\t\tPCI-E Gen %u\n",
1378                                    ndev->link_speed);
1379                 offset += snprintf(buf + offset, out_count - offset,
1380                                    "Link Width - \t\t\tx%u\n",
1381                                    ndev->link_width);
1382         }
1383
1384         if (is_ntb_xeon(ndev)) {
1385                 u32 status32;
1386                 u16 status16;
1387                 int rc;
1388
1389                 offset += snprintf(buf + offset, out_count - offset,
1390                                    "\nNTB Device Statistics:\n");
1391                 offset += snprintf(buf + offset, out_count - offset,
1392                                    "Upstream Memory Miss - \t%u\n",
1393                                    readw(ndev->reg_base +
1394                                          SNB_USMEMMISS_OFFSET));
1395
1396                 offset += snprintf(buf + offset, out_count - offset,
1397                                    "\nNTB Hardware Errors:\n");
1398
1399                 rc = pci_read_config_word(ndev->pdev, SNB_DEVSTS_OFFSET,
1400                                           &status16);
1401                 if (!rc)
1402                         offset += snprintf(buf + offset, out_count - offset,
1403                                            "DEVSTS - \t%#06x\n", status16);
1404
1405                 rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET,
1406                                           &status16);
1407                 if (!rc)
1408                         offset += snprintf(buf + offset, out_count - offset,
1409                                            "LNKSTS - \t%#06x\n", status16);
1410
1411                 rc = pci_read_config_dword(ndev->pdev, SNB_UNCERRSTS_OFFSET,
1412                                            &status32);
1413                 if (!rc)
1414                         offset += snprintf(buf + offset, out_count - offset,
1415                                            "UNCERRSTS - \t%#010x\n", status32);
1416
1417                 rc = pci_read_config_dword(ndev->pdev, SNB_CORERRSTS_OFFSET,
1418                                            &status32);
1419                 if (!rc)
1420                         offset += snprintf(buf + offset, out_count - offset,
1421                                            "CORERRSTS - \t%#010x\n", status32);
1422         }
1423
1424         if (offset > out_count)
1425                 offset = out_count;
1426
1427         ret = simple_read_from_buffer(ubuf, count, offp, buf, offset);
1428         kfree(buf);
1429         return ret;
1430 }
1431
1432 static const struct file_operations ntb_debugfs_info = {
1433         .owner = THIS_MODULE,
1434         .open = simple_open,
1435         .read = ntb_debugfs_read,
1436 };
1437
1438 static void ntb_setup_debugfs(struct ntb_device *ndev)
1439 {
1440         if (!debugfs_initialized())
1441                 return;
1442
1443         if (!debugfs_dir)
1444                 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1445
1446         ndev->debugfs_dir = debugfs_create_dir(pci_name(ndev->pdev),
1447                                                debugfs_dir);
1448         if (ndev->debugfs_dir)
1449                 ndev->debugfs_info = debugfs_create_file("info", S_IRUSR,
1450                                                          ndev->debugfs_dir,
1451                                                          ndev,
1452                                                          &ntb_debugfs_info);
1453 }
1454
1455 static void ntb_free_debugfs(struct ntb_device *ndev)
1456 {
1457         debugfs_remove_recursive(ndev->debugfs_dir);
1458
1459         if (debugfs_dir && simple_empty(debugfs_dir)) {
1460                 debugfs_remove_recursive(debugfs_dir);
1461                 debugfs_dir = NULL;
1462         }
1463 }
1464
1465 static void ntb_hw_link_up(struct ntb_device *ndev)
1466 {
1467         if (ndev->conn_type == NTB_CONN_TRANSPARENT)
1468                 ntb_link_event(ndev, NTB_LINK_UP);
1469         else {
1470                 u32 ntb_cntl;
1471
1472                 /* Let's bring the NTB link up */
1473                 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1474                 ntb_cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
1475                 ntb_cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
1476                 ntb_cntl |= NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP;
1477                 writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1478         }
1479 }
1480
1481 static void ntb_hw_link_down(struct ntb_device *ndev)
1482 {
1483         u32 ntb_cntl;
1484
1485         if (ndev->conn_type == NTB_CONN_TRANSPARENT) {
1486                 ntb_link_event(ndev, NTB_LINK_DOWN);
1487                 return;
1488         }
1489
1490         /* Bring NTB link down */
1491         ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1492         ntb_cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
1493         ntb_cntl &= ~(NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP);
1494         ntb_cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
1495         writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1496 }
1497
1498 static int ntb_xeon_detect(struct ntb_device *ndev)
1499 {
1500         int rc;
1501         u8 ppd;
1502
1503         ndev->hw_type = SNB_HW;
1504
1505         rc = pci_read_config_byte(ndev->pdev, NTB_PPD_OFFSET, &ppd);
1506         if (rc)
1507                 return -EIO;
1508
1509         if (ppd & SNB_PPD_DEV_TYPE)
1510                 ndev->dev_type = NTB_DEV_USD;
1511         else
1512                 ndev->dev_type = NTB_DEV_DSD;
1513
1514         switch (ppd & SNB_PPD_CONN_TYPE) {
1515         case NTB_CONN_B2B:
1516                 dev_info(&ndev->pdev->dev, "Conn Type = B2B\n");
1517                 ndev->conn_type = NTB_CONN_B2B;
1518                 break;
1519         case NTB_CONN_RP:
1520                 dev_info(&ndev->pdev->dev, "Conn Type = RP\n");
1521                 ndev->conn_type = NTB_CONN_RP;
1522                 break;
1523         case NTB_CONN_TRANSPARENT:
1524                 dev_info(&ndev->pdev->dev, "Conn Type = TRANSPARENT\n");
1525                 ndev->conn_type = NTB_CONN_TRANSPARENT;
1526                 /*
1527                  * This mode is default to USD/DSP. HW does not report
1528                  * properly in transparent mode as it has no knowledge of
1529                  * NTB. We will just force correct here.
1530                  */
1531                 ndev->dev_type = NTB_DEV_USD;
1532                 break;
1533         default:
1534                 dev_err(&ndev->pdev->dev, "Unknown PPD %x\n", ppd);
1535                 return -ENODEV;
1536         }
1537
1538         return 0;
1539 }
1540
1541 static int ntb_atom_detect(struct ntb_device *ndev)
1542 {
1543         int rc;
1544         u32 ppd;
1545
1546         ndev->hw_type = BWD_HW;
1547
1548         rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &ppd);
1549         if (rc)
1550                 return rc;
1551
1552         switch ((ppd & BWD_PPD_CONN_TYPE) >> 8) {
1553         case NTB_CONN_B2B:
1554                 dev_info(&ndev->pdev->dev, "Conn Type = B2B\n");
1555                 ndev->conn_type = NTB_CONN_B2B;
1556                 break;
1557         case NTB_CONN_RP:
1558         default:
1559                 dev_err(&ndev->pdev->dev, "Unsupported NTB configuration\n");
1560                 return -EINVAL;
1561         }
1562
1563         if (ppd & BWD_PPD_DEV_TYPE)
1564                 ndev->dev_type = NTB_DEV_DSD;
1565         else
1566                 ndev->dev_type = NTB_DEV_USD;
1567
1568         return 0;
1569 }
1570
1571 static int ntb_device_detect(struct ntb_device *ndev)
1572 {
1573         int rc;
1574
1575         if (is_ntb_xeon(ndev))
1576                 rc = ntb_xeon_detect(ndev);
1577         else if (is_ntb_atom(ndev))
1578                 rc = ntb_atom_detect(ndev);
1579         else
1580                 rc = -ENODEV;
1581
1582         dev_info(&ndev->pdev->dev, "Device Type = %s\n",
1583                  ndev->dev_type == NTB_DEV_USD ? "USD/DSP" : "DSD/USP");
1584
1585         return 0;
1586 }
1587
1588 static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1589 {
1590         struct ntb_device *ndev;
1591         int rc, i;
1592
1593         ndev = kzalloc(sizeof(struct ntb_device), GFP_KERNEL);
1594         if (!ndev)
1595                 return -ENOMEM;
1596
1597         ndev->pdev = pdev;
1598         ndev->link_status = NTB_LINK_DOWN;
1599         pci_set_drvdata(pdev, ndev);
1600         ntb_setup_debugfs(ndev);
1601
1602         rc = pci_enable_device(pdev);
1603         if (rc)
1604                 goto err;
1605
1606         pci_set_master(ndev->pdev);
1607
1608         rc = ntb_device_detect(ndev);
1609         if (rc)
1610                 goto err;
1611
1612         rc = pci_request_selected_regions(pdev, NTB_BAR_MASK, KBUILD_MODNAME);
1613         if (rc)
1614                 goto err1;
1615
1616         ndev->reg_base = pci_ioremap_bar(pdev, NTB_BAR_MMIO);
1617         if (!ndev->reg_base) {
1618                 dev_warn(&pdev->dev, "Cannot remap BAR 0\n");
1619                 rc = -EIO;
1620                 goto err2;
1621         }
1622
1623         for (i = 0; i < NTB_MAX_NUM_MW; i++) {
1624                 ndev->mw[i].bar_sz = pci_resource_len(pdev, MW_TO_BAR(i));
1625                 ndev->mw[i].vbase =
1626                     ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
1627                                ndev->mw[i].bar_sz);
1628                 dev_info(&pdev->dev, "MW %d size %llu\n", i,
1629                          (unsigned long long) ndev->mw[i].bar_sz);
1630                 if (!ndev->mw[i].vbase) {
1631                         dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
1632                                  MW_TO_BAR(i));
1633                         rc = -EIO;
1634                         goto err3;
1635                 }
1636         }
1637
1638         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1639         if (rc) {
1640                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1641                 if (rc)
1642                         goto err3;
1643
1644                 dev_warn(&pdev->dev, "Cannot DMA highmem\n");
1645         }
1646
1647         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1648         if (rc) {
1649                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1650                 if (rc)
1651                         goto err3;
1652
1653                 dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
1654         }
1655
1656         rc = ntb_device_setup(ndev);
1657         if (rc)
1658                 goto err3;
1659
1660         rc = ntb_create_callbacks(ndev);
1661         if (rc)
1662                 goto err4;
1663
1664         rc = ntb_setup_interrupts(ndev);
1665         if (rc)
1666                 goto err5;
1667
1668         /* The scratchpad registers keep the values between rmmod/insmod,
1669          * blast them now
1670          */
1671         for (i = 0; i < ndev->limits.max_spads; i++) {
1672                 ntb_write_local_spad(ndev, i, 0);
1673                 ntb_write_remote_spad(ndev, i, 0);
1674         }
1675
1676         rc = ntb_transport_init(pdev);
1677         if (rc)
1678                 goto err6;
1679
1680         ntb_hw_link_up(ndev);
1681
1682         return 0;
1683
1684 err6:
1685         ntb_free_interrupts(ndev);
1686 err5:
1687         ntb_free_callbacks(ndev);
1688 err4:
1689         ntb_device_free(ndev);
1690 err3:
1691         for (i--; i >= 0; i--)
1692                 iounmap(ndev->mw[i].vbase);
1693         iounmap(ndev->reg_base);
1694 err2:
1695         pci_release_selected_regions(pdev, NTB_BAR_MASK);
1696 err1:
1697         pci_disable_device(pdev);
1698 err:
1699         ntb_free_debugfs(ndev);
1700         kfree(ndev);
1701
1702         dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
1703         return rc;
1704 }
1705
1706 static void ntb_pci_remove(struct pci_dev *pdev)
1707 {
1708         struct ntb_device *ndev = pci_get_drvdata(pdev);
1709         int i;
1710
1711         ntb_hw_link_down(ndev);
1712
1713         ntb_transport_free(ndev->ntb_transport);
1714
1715         ntb_free_interrupts(ndev);
1716         ntb_free_callbacks(ndev);
1717         ntb_device_free(ndev);
1718
1719         for (i = 0; i < NTB_MAX_NUM_MW; i++)
1720                 iounmap(ndev->mw[i].vbase);
1721
1722         iounmap(ndev->reg_base);
1723         pci_release_selected_regions(pdev, NTB_BAR_MASK);
1724         pci_disable_device(pdev);
1725         ntb_free_debugfs(ndev);
1726         kfree(ndev);
1727 }
1728
1729 static struct pci_driver ntb_pci_driver = {
1730         .name = KBUILD_MODNAME,
1731         .id_table = ntb_pci_tbl,
1732         .probe = ntb_pci_probe,
1733         .remove = ntb_pci_remove,
1734 };
1735
1736 module_pci_driver(ntb_pci_driver);