]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/wireless/ath/wil6210/pmc.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[karo-tx-linux.git] / drivers / net / wireless / ath / wil6210 / pmc.c
1 /*
2  * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16
17 #include <linux/types.h>
18 #include <linux/errno.h>
19 #include <linux/fs.h>
20 #include "wmi.h"
21 #include "wil6210.h"
22 #include "txrx.h"
23 #include "pmc.h"
24
25 struct desc_alloc_info {
26         dma_addr_t pa;
27         void      *va;
28 };
29
30 static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
31 {
32         return !!pmc->pring_va;
33 }
34
35 void wil_pmc_init(struct wil6210_priv *wil)
36 {
37         memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
38         mutex_init(&wil->pmc.lock);
39 }
40
41 /**
42  * Allocate the physical ring (p-ring) and the required
43  * number of descriptors of required size.
44  * Initialize the descriptors as required by pmc dma.
45  * The descriptors' buffers dwords are initialized to hold
46  * dword's serial number in the lsw and reserved value
47  * PCM_DATA_INVALID_DW_VAL in the msw.
48  */
49 void wil_pmc_alloc(struct wil6210_priv *wil,
50                    int num_descriptors,
51                    int descriptor_size)
52 {
53         u32 i;
54         struct pmc_ctx *pmc = &wil->pmc;
55         struct device *dev = wil_to_dev(wil);
56         struct wmi_pmc_cmd pmc_cmd = {0};
57
58         mutex_lock(&pmc->lock);
59
60         if (wil_is_pmc_allocated(pmc)) {
61                 /* sanity check */
62                 wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
63                 goto no_release_err;
64         }
65
66         pmc->num_descriptors = num_descriptors;
67         pmc->descriptor_size = descriptor_size;
68
69         wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
70                      __func__, num_descriptors, descriptor_size);
71
72         /* allocate descriptors info list in pmc context*/
73         pmc->descriptors = kcalloc(num_descriptors,
74                                   sizeof(struct desc_alloc_info),
75                                   GFP_KERNEL);
76         if (!pmc->descriptors) {
77                 wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
78                 goto no_release_err;
79         }
80
81         wil_dbg_misc(wil,
82                      "%s: allocated descriptors info list %p\n",
83                      __func__, pmc->descriptors);
84
85         /* Allocate pring buffer and descriptors.
86          * vring->va should be aligned on its size rounded up to power of 2
87          * This is granted by the dma_alloc_coherent
88          */
89         pmc->pring_va = dma_alloc_coherent(dev,
90                         sizeof(struct vring_tx_desc) * num_descriptors,
91                         &pmc->pring_pa,
92                         GFP_KERNEL);
93
94         wil_dbg_misc(wil,
95                      "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
96                      __func__,
97                      pmc->pring_va, &pmc->pring_pa,
98                      sizeof(struct vring_tx_desc),
99                      num_descriptors,
100                      sizeof(struct vring_tx_desc) * num_descriptors);
101
102         if (!pmc->pring_va) {
103                 wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
104                 goto release_pmc_skb_list;
105         }
106
107         /* initially, all descriptors are SW owned
108          * For Tx, Rx, and PMC, ownership bit is at the same location, thus
109          * we can use any
110          */
111         for (i = 0; i < num_descriptors; i++) {
112                 struct vring_tx_desc *_d = &pmc->pring_va[i];
113                 struct vring_tx_desc dd = {}, *d = &dd;
114                 int j = 0;
115
116                 pmc->descriptors[i].va = dma_alloc_coherent(dev,
117                         descriptor_size,
118                         &pmc->descriptors[i].pa,
119                         GFP_KERNEL);
120
121                 if (unlikely(!pmc->descriptors[i].va)) {
122                         wil_err(wil,
123                                 "%s: ERROR allocating pmc descriptor %d",
124                                 __func__, i);
125                         goto release_pmc_skbs;
126                 }
127
128                 for (j = 0; j < descriptor_size / sizeof(u32); j++) {
129                         u32 *p = (u32 *)pmc->descriptors[i].va + j;
130                         *p = PCM_DATA_INVALID_DW_VAL | j;
131                 }
132
133                 /* configure dma descriptor */
134                 d->dma.addr.addr_low =
135                         cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
136                 d->dma.addr.addr_high =
137                         cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
138                 d->dma.status = 0; /* 0 = HW_OWNED */
139                 d->dma.length = cpu_to_le16(descriptor_size);
140                 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
141                 *_d = *d;
142         }
143
144         wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
145
146         pmc_cmd.op = WMI_PMC_ALLOCATE;
147         pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
148         pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
149
150         wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
151         pmc->last_cmd_status = wmi_send(wil,
152                                         WMI_PMC_CMDID,
153                                         &pmc_cmd,
154                                         sizeof(pmc_cmd));
155         if (pmc->last_cmd_status) {
156                 wil_err(wil,
157                         "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
158                         __func__, pmc->last_cmd_status);
159                 goto release_pmc_skbs;
160         }
161
162         mutex_unlock(&pmc->lock);
163
164         return;
165
166 release_pmc_skbs:
167         wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
168         for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
169                 dma_free_coherent(dev,
170                                   descriptor_size,
171                                   pmc->descriptors[i].va,
172                                   pmc->descriptors[i].pa);
173
174                 pmc->descriptors[i].va = NULL;
175         }
176         wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
177
178         dma_free_coherent(dev,
179                           sizeof(struct vring_tx_desc) * num_descriptors,
180                           pmc->pring_va,
181                           pmc->pring_pa);
182
183         pmc->pring_va = NULL;
184
185 release_pmc_skb_list:
186         wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
187                 __func__);
188         kfree(pmc->descriptors);
189         pmc->descriptors = NULL;
190
191 no_release_err:
192         pmc->last_cmd_status = -ENOMEM;
193         mutex_unlock(&pmc->lock);
194 }
195
196 /**
197  * Traverse the p-ring and release all buffers.
198  * At the end release the p-ring memory
199  */
200 void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
201 {
202         struct pmc_ctx *pmc = &wil->pmc;
203         struct device *dev = wil_to_dev(wil);
204         struct wmi_pmc_cmd pmc_cmd = {0};
205
206         mutex_lock(&pmc->lock);
207
208         pmc->last_cmd_status = 0;
209
210         if (!wil_is_pmc_allocated(pmc)) {
211                 wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
212                              __func__);
213                 pmc->last_cmd_status = -EPERM;
214                 mutex_unlock(&pmc->lock);
215                 return;
216         }
217
218         if (send_pmc_cmd) {
219                 wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
220                              __func__);
221                 pmc_cmd.op = WMI_PMC_RELEASE;
222                 pmc->last_cmd_status =
223                                 wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
224                                          sizeof(pmc_cmd));
225                 if (pmc->last_cmd_status) {
226                         wil_err(wil,
227                                 "%s WMI_PMC_CMD with RELEASE op failed, status %d",
228                                 __func__, pmc->last_cmd_status);
229                         /* There's nothing we can do with this error.
230                          * Normally, it should never occur.
231                          * Continue to freeing all memory allocated for pmc.
232                          */
233                 }
234         }
235
236         if (pmc->pring_va) {
237                 size_t buf_size = sizeof(struct vring_tx_desc) *
238                                   pmc->num_descriptors;
239
240                 wil_dbg_misc(wil, "%s: free pring va %p\n",
241                              __func__, pmc->pring_va);
242                 dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
243
244                 pmc->pring_va = NULL;
245         } else {
246                 pmc->last_cmd_status = -ENOENT;
247         }
248
249         if (pmc->descriptors) {
250                 int i;
251
252                 for (i = 0;
253                      pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
254                         dma_free_coherent(dev,
255                                           pmc->descriptor_size,
256                                           pmc->descriptors[i].va,
257                                           pmc->descriptors[i].pa);
258                         pmc->descriptors[i].va = NULL;
259                 }
260                 wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
261                              __func__, i, pmc->num_descriptors);
262                 wil_dbg_misc(wil,
263                              "%s: free pmc descriptors info list %p\n",
264                              __func__, pmc->descriptors);
265                 kfree(pmc->descriptors);
266                 pmc->descriptors = NULL;
267         } else {
268                 pmc->last_cmd_status = -ENOENT;
269         }
270
271         mutex_unlock(&pmc->lock);
272 }
273
274 /**
275  * Status of the last operation requested via debugfs: alloc/free/read.
276  * 0 - success or negative errno
277  */
278 int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
279 {
280         wil_dbg_misc(wil, "%s: status %d\n", __func__,
281                      wil->pmc.last_cmd_status);
282
283         return wil->pmc.last_cmd_status;
284 }
285
286 /**
287  * Read from required position up to the end of current descriptor,
288  * depends on descriptor size configured during alloc request.
289  */
290 ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
291                      loff_t *f_pos)
292 {
293         struct wil6210_priv *wil = filp->private_data;
294         struct pmc_ctx *pmc = &wil->pmc;
295         size_t retval = 0;
296         unsigned long long idx;
297         loff_t offset;
298         size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
299
300         mutex_lock(&pmc->lock);
301
302         if (!wil_is_pmc_allocated(pmc)) {
303                 wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
304                 pmc->last_cmd_status = -EPERM;
305                 mutex_unlock(&pmc->lock);
306                 return -EPERM;
307         }
308
309         wil_dbg_misc(wil,
310                      "%s: size %u, pos %lld\n",
311                      __func__, (unsigned)count, *f_pos);
312
313         pmc->last_cmd_status = 0;
314
315         idx = *f_pos;
316         do_div(idx, pmc->descriptor_size);
317         offset = *f_pos - (idx * pmc->descriptor_size);
318
319         if (*f_pos >= pmc_size) {
320                 wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
321                              __func__, *f_pos, (unsigned)pmc_size);
322                 pmc->last_cmd_status = -ERANGE;
323                 goto out;
324         }
325
326         wil_dbg_misc(wil,
327                      "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
328                      __func__, *f_pos, idx, offset, count);
329
330         /* if no errors, return the copied byte count */
331         retval = simple_read_from_buffer(buf,
332                                          count,
333                                          &offset,
334                                          pmc->descriptors[idx].va,
335                                          pmc->descriptor_size);
336         *f_pos += retval;
337 out:
338         mutex_unlock(&pmc->lock);
339
340         return retval;
341 }
342
343 loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
344 {
345         loff_t newpos;
346         struct wil6210_priv *wil = filp->private_data;
347         struct pmc_ctx *pmc = &wil->pmc;
348         size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
349
350         switch (whence) {
351         case 0: /* SEEK_SET */
352                 newpos = off;
353                 break;
354
355         case 1: /* SEEK_CUR */
356                 newpos = filp->f_pos + off;
357                 break;
358
359         case 2: /* SEEK_END */
360                 newpos = pmc_size;
361                 break;
362
363         default: /* can't happen */
364                 return -EINVAL;
365         }
366
367         if (newpos < 0)
368                 return -EINVAL;
369         if (newpos > pmc_size)
370                 newpos = pmc_size;
371
372         filp->f_pos = newpos;
373
374         return newpos;
375 }