]> git.kernelconcepts.de Git - karo-tx-uboot.git/blob - arch/arm/cpu/armv7/keystone/keystone_nav.c
Merge branch 'tom' of git://git.denx.de/u-boot-x86
[karo-tx-uboot.git] / arch / arm / cpu / armv7 / keystone / keystone_nav.c
1 /*
2  * Multicore Navigator driver for TI Keystone 2 devices.
3  *
4  * (C) Copyright 2012-2014
5  *     Texas Instruments Incorporated, <www.ti.com>
6  *
7  * SPDX-License-Identifier:     GPL-2.0+
8  */
9 #include <common.h>
10 #include <asm/io.h>
11 #include <asm/arch/keystone_nav.h>
12
13 static int soc_type =
14 #ifdef CONFIG_SOC_K2HK
15         k2hk;
16 #endif
17
18 struct qm_config k2hk_qm_memmap = {
19         .stat_cfg       = 0x02a40000,
20         .queue          = (struct qm_reg_queue *)0x02a80000,
21         .mngr_vbusm     = 0x23a80000,
22         .i_lram         = 0x00100000,
23         .proxy          = (struct qm_reg_queue *)0x02ac0000,
24         .status_ram     = 0x02a06000,
25         .mngr_cfg       = (struct qm_cfg_reg *)0x02a02000,
26         .intd_cfg       = 0x02a0c000,
27         .desc_mem       = (struct descr_mem_setup_reg *)0x02a03000,
28         .region_num     = 64,
29         .pdsp_cmd       = 0x02a20000,
30         .pdsp_ctl       = 0x02a0f000,
31         .pdsp_iram      = 0x02a10000,
32         .qpool_num      = 4000,
33 };
34
35 /*
36  * We are going to use only one type of descriptors - host packet
37  * descriptors. We staticaly allocate memory for them here
38  */
39 struct qm_host_desc desc_pool[HDESC_NUM] __aligned(sizeof(struct qm_host_desc));
40
41 static struct qm_config *qm_cfg;
42
43 inline int num_of_desc_to_reg(int num_descr)
44 {
45         int j, num;
46
47         for (j = 0, num = 32; j < 15; j++, num *= 2) {
48                 if (num_descr <= num)
49                         return j;
50         }
51
52         return 15;
53 }
54
55 static int _qm_init(struct qm_config *cfg)
56 {
57         u32     j;
58
59         if (cfg == NULL)
60                 return QM_ERR;
61
62         qm_cfg = cfg;
63
64         qm_cfg->mngr_cfg->link_ram_base0        = qm_cfg->i_lram;
65         qm_cfg->mngr_cfg->link_ram_size0        = HDESC_NUM * 8;
66         qm_cfg->mngr_cfg->link_ram_base1        = 0;
67         qm_cfg->mngr_cfg->link_ram_size1        = 0;
68         qm_cfg->mngr_cfg->link_ram_base2        = 0;
69
70         qm_cfg->desc_mem[0].base_addr = (u32)desc_pool;
71         qm_cfg->desc_mem[0].start_idx = 0;
72         qm_cfg->desc_mem[0].desc_reg_size =
73                 (((sizeof(struct qm_host_desc) >> 4) - 1) << 16) |
74                 num_of_desc_to_reg(HDESC_NUM);
75
76         memset(desc_pool, 0, sizeof(desc_pool));
77         for (j = 0; j < HDESC_NUM; j++)
78                 qm_push(&desc_pool[j], qm_cfg->qpool_num);
79
80         return QM_OK;
81 }
82
83 int qm_init(void)
84 {
85         switch (soc_type) {
86         case k2hk:
87                 return _qm_init(&k2hk_qm_memmap);
88         }
89
90         return QM_ERR;
91 }
92
93 void qm_close(void)
94 {
95         u32     j;
96
97         if (qm_cfg == NULL)
98                 return;
99
100         queue_close(qm_cfg->qpool_num);
101
102         qm_cfg->mngr_cfg->link_ram_base0        = 0;
103         qm_cfg->mngr_cfg->link_ram_size0        = 0;
104         qm_cfg->mngr_cfg->link_ram_base1        = 0;
105         qm_cfg->mngr_cfg->link_ram_size1        = 0;
106         qm_cfg->mngr_cfg->link_ram_base2        = 0;
107
108         for (j = 0; j < qm_cfg->region_num; j++) {
109                 qm_cfg->desc_mem[j].base_addr = 0;
110                 qm_cfg->desc_mem[j].start_idx = 0;
111                 qm_cfg->desc_mem[j].desc_reg_size = 0;
112         }
113
114         qm_cfg = NULL;
115 }
116
117 void qm_push(struct qm_host_desc *hd, u32 qnum)
118 {
119         u32 regd;
120
121         if (!qm_cfg)
122                 return;
123
124         cpu_to_bus((u32 *)hd, sizeof(struct qm_host_desc)/4);
125         regd = (u32)hd | ((sizeof(struct qm_host_desc) >> 4) - 1);
126         writel(regd, &qm_cfg->queue[qnum].ptr_size_thresh);
127 }
128
129 void qm_buff_push(struct qm_host_desc *hd, u32 qnum,
130                     void *buff_ptr, u32 buff_len)
131 {
132         hd->orig_buff_len = buff_len;
133         hd->buff_len = buff_len;
134         hd->orig_buff_ptr = (u32)buff_ptr;
135         hd->buff_ptr = (u32)buff_ptr;
136         qm_push(hd, qnum);
137 }
138
139 struct qm_host_desc *qm_pop(u32 qnum)
140 {
141         u32 uhd;
142
143         if (!qm_cfg)
144                 return NULL;
145
146         uhd = readl(&qm_cfg->queue[qnum].ptr_size_thresh) & ~0xf;
147         if (uhd)
148                 cpu_to_bus((u32 *)uhd, sizeof(struct qm_host_desc)/4);
149
150         return (struct qm_host_desc *)uhd;
151 }
152
153 struct qm_host_desc *qm_pop_from_free_pool(void)
154 {
155         if (!qm_cfg)
156                 return NULL;
157
158         return qm_pop(qm_cfg->qpool_num);
159 }
160
161 void queue_close(u32 qnum)
162 {
163         struct qm_host_desc *hd;
164
165         while ((hd = qm_pop(qnum)))
166                 ;
167 }
168
169 /*
170  * DMA API
171  */
172
173 struct pktdma_cfg k2hk_netcp_pktdma = {
174         .global         = (struct global_ctl_regs *)0x02004000,
175         .tx_ch          = (struct tx_chan_regs *)0x02004400,
176         .tx_ch_num      = 9,
177         .rx_ch          = (struct rx_chan_regs *)0x02004800,
178         .rx_ch_num      = 26,
179         .tx_sched       = (u32 *)0x02004c00,
180         .rx_flows       = (struct rx_flow_regs *)0x02005000,
181         .rx_flow_num    = 32,
182         .rx_free_q      = 4001,
183         .rx_rcv_q       = 4002,
184         .tx_snd_q       = 648,
185 };
186
187 struct pktdma_cfg *netcp;
188
189 static int netcp_rx_disable(void)
190 {
191         u32 j, v, k;
192
193         for (j = 0; j < netcp->rx_ch_num; j++) {
194                 v = readl(&netcp->rx_ch[j].cfg_a);
195                 if (!(v & CPDMA_CHAN_A_ENABLE))
196                         continue;
197
198                 writel(v | CPDMA_CHAN_A_TDOWN, &netcp->rx_ch[j].cfg_a);
199                 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
200                         udelay(100);
201                         v = readl(&netcp->rx_ch[j].cfg_a);
202                         if (!(v & CPDMA_CHAN_A_ENABLE))
203                                 continue;
204                 }
205                 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
206         }
207
208         /* Clear all of the flow registers */
209         for (j = 0; j < netcp->rx_flow_num; j++) {
210                 writel(0, &netcp->rx_flows[j].control);
211                 writel(0, &netcp->rx_flows[j].tags);
212                 writel(0, &netcp->rx_flows[j].tag_sel);
213                 writel(0, &netcp->rx_flows[j].fdq_sel[0]);
214                 writel(0, &netcp->rx_flows[j].fdq_sel[1]);
215                 writel(0, &netcp->rx_flows[j].thresh[0]);
216                 writel(0, &netcp->rx_flows[j].thresh[1]);
217                 writel(0, &netcp->rx_flows[j].thresh[2]);
218         }
219
220         return QM_OK;
221 }
222
223 static int netcp_tx_disable(void)
224 {
225         u32 j, v, k;
226
227         for (j = 0; j < netcp->tx_ch_num; j++) {
228                 v = readl(&netcp->tx_ch[j].cfg_a);
229                 if (!(v & CPDMA_CHAN_A_ENABLE))
230                         continue;
231
232                 writel(v | CPDMA_CHAN_A_TDOWN, &netcp->tx_ch[j].cfg_a);
233                 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
234                         udelay(100);
235                         v = readl(&netcp->tx_ch[j].cfg_a);
236                         if (!(v & CPDMA_CHAN_A_ENABLE))
237                                 continue;
238                 }
239                 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
240         }
241
242         return QM_OK;
243 }
244
245 static int _netcp_init(struct pktdma_cfg *netcp_cfg,
246                        struct rx_buff_desc *rx_buffers)
247 {
248         u32 j, v;
249         struct qm_host_desc *hd;
250         u8 *rx_ptr;
251
252         if (netcp_cfg == NULL || rx_buffers == NULL ||
253             rx_buffers->buff_ptr == NULL || qm_cfg == NULL)
254                 return QM_ERR;
255
256         netcp = netcp_cfg;
257         netcp->rx_flow = rx_buffers->rx_flow;
258
259         /* init rx queue */
260         rx_ptr = rx_buffers->buff_ptr;
261
262         for (j = 0; j < rx_buffers->num_buffs; j++) {
263                 hd = qm_pop(qm_cfg->qpool_num);
264                 if (hd == NULL)
265                         return QM_ERR;
266
267                 qm_buff_push(hd, netcp->rx_free_q,
268                              rx_ptr, rx_buffers->buff_len);
269
270                 rx_ptr += rx_buffers->buff_len;
271         }
272
273         netcp_rx_disable();
274
275         /* configure rx channels */
276         v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, netcp->rx_rcv_q);
277         writel(v, &netcp->rx_flows[netcp->rx_flow].control);
278         writel(0, &netcp->rx_flows[netcp->rx_flow].tags);
279         writel(0, &netcp->rx_flows[netcp->rx_flow].tag_sel);
280
281         v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, netcp->rx_free_q, 0,
282                                          netcp->rx_free_q);
283
284         writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[0]);
285         writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[1]);
286         writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[0]);
287         writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[1]);
288         writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[2]);
289
290         for (j = 0; j < netcp->rx_ch_num; j++)
291                 writel(CPDMA_CHAN_A_ENABLE, &netcp->rx_ch[j].cfg_a);
292
293         /* configure tx channels */
294         /* Disable loopback in the tx direction */
295         writel(0, &netcp->global->emulation_control);
296
297 /* TODO: make it dependend on a soc type variable */
298 #ifdef CONFIG_SOC_K2HK
299         /* Set QM base address, only for K2x devices */
300         writel(0x23a80000, &netcp->global->qm_base_addr[0]);
301 #endif
302
303         /* Enable all channels. The current state isn't important */
304         for (j = 0; j < netcp->tx_ch_num; j++)  {
305                 writel(0, &netcp->tx_ch[j].cfg_b);
306                 writel(CPDMA_CHAN_A_ENABLE, &netcp->tx_ch[j].cfg_a);
307         }
308
309         return QM_OK;
310 }
311
312 int netcp_init(struct rx_buff_desc *rx_buffers)
313 {
314         switch (soc_type) {
315         case k2hk:
316                 _netcp_init(&k2hk_netcp_pktdma, rx_buffers);
317                 return QM_OK;
318         }
319         return QM_ERR;
320 }
321
322 int netcp_close(void)
323 {
324         if (!netcp)
325                 return QM_ERR;
326
327         netcp_tx_disable();
328         netcp_rx_disable();
329
330         queue_close(netcp->rx_free_q);
331         queue_close(netcp->rx_rcv_q);
332         queue_close(netcp->tx_snd_q);
333
334         return QM_OK;
335 }
336
337 int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2)
338 {
339         struct qm_host_desc *hd;
340
341         hd = qm_pop(qm_cfg->qpool_num);
342         if (hd == NULL)
343                 return QM_ERR;
344
345         hd->desc_info   = num_bytes;
346         hd->swinfo[2]   = swinfo2;
347         hd->packet_info = qm_cfg->qpool_num;
348
349         qm_buff_push(hd, netcp->tx_snd_q, pkt, num_bytes);
350
351         return QM_OK;
352 }
353
354 void *netcp_recv(u32 **pkt, int *num_bytes)
355 {
356         struct qm_host_desc *hd;
357
358         hd = qm_pop(netcp->rx_rcv_q);
359         if (!hd)
360                 return NULL;
361
362         *pkt = (u32 *)hd->buff_ptr;
363         *num_bytes = hd->desc_info & 0x3fffff;
364
365         return hd;
366 }
367
368 void netcp_release_rxhd(void *hd)
369 {
370         struct qm_host_desc *_hd = (struct qm_host_desc *)hd;
371
372         _hd->buff_len = _hd->orig_buff_len;
373         _hd->buff_ptr = _hd->orig_buff_ptr;
374
375         qm_push(_hd, netcp->rx_free_q);
376 }