]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/atm/iphase.c
Merge branch 'tpmdd-next-v3.6' of git://github.com/shpedoikal/linux into for-linus
[karo-tx-linux.git] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The followings are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>  
44 #include <linux/kernel.h>  
45 #include <linux/mm.h>  
46 #include <linux/pci.h>  
47 #include <linux/errno.h>  
48 #include <linux/atm.h>  
49 #include <linux/atmdev.h>  
50 #include <linux/sonet.h>  
51 #include <linux/skbuff.h>  
52 #include <linux/time.h>  
53 #include <linux/delay.h>  
54 #include <linux/uio.h>  
55 #include <linux/init.h>  
56 #include <linux/interrupt.h>
57 #include <linux/wait.h>
58 #include <linux/slab.h>
59 #include <asm/io.h>  
60 #include <linux/atomic.h>
61 #include <asm/uaccess.h>  
62 #include <asm/string.h>  
63 #include <asm/byteorder.h>  
64 #include <linux/vmalloc.h>
65 #include <linux/jiffies.h>
66 #include "iphase.h"               
67 #include "suni.h"                 
68 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
69
70 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
71
72 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
73 static void desc_dbg(IADEV *iadev);
74
75 static IADEV *ia_dev[8];
76 static struct atm_dev *_ia_dev[8];
77 static int iadev_count;
78 static void ia_led_timer(unsigned long arg);
79 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
80 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
81 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
82 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
83             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
84
85 module_param(IA_TX_BUF, int, 0);
86 module_param(IA_TX_BUF_SZ, int, 0);
87 module_param(IA_RX_BUF, int, 0);
88 module_param(IA_RX_BUF_SZ, int, 0);
89 module_param(IADebugFlag, uint, 0644);
90
91 MODULE_LICENSE("GPL");
92
93 /**************************** IA_LIB **********************************/
94
95 static void ia_init_rtn_q (IARTN_Q *que) 
96
97    que->next = NULL; 
98    que->tail = NULL; 
99 }
100
101 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
102 {
103    data->next = NULL;
104    if (que->next == NULL) 
105       que->next = que->tail = data;
106    else {
107       data->next = que->next;
108       que->next = data;
109    } 
110    return;
111 }
112
113 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
114    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
115    if (!entry) return -1;
116    entry->data = data;
117    entry->next = NULL;
118    if (que->next == NULL) 
119       que->next = que->tail = entry;
120    else {
121       que->tail->next = entry;
122       que->tail = que->tail->next;
123    }      
124    return 1;
125 }
126
127 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
128    IARTN_Q *tmpdata;
129    if (que->next == NULL)
130       return NULL;
131    tmpdata = que->next;
132    if ( que->next == que->tail)  
133       que->next = que->tail = NULL;
134    else 
135       que->next = que->next->next;
136    return tmpdata;
137 }
138
139 static void ia_hack_tcq(IADEV *dev) {
140
141   u_short               desc1;
142   u_short               tcq_wr;
143   struct ia_vcc         *iavcc_r = NULL; 
144
145   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
146   while (dev->host_tcq_wr != tcq_wr) {
147      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
148      if (!desc1) ;
149      else if (!dev->desc_tbl[desc1 -1].timestamp) {
150         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
151         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
152      }                                 
153      else if (dev->desc_tbl[desc1 -1].timestamp) {
154         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
155            printk("IA: Fatal err in get_desc\n");
156            continue;
157         }
158         iavcc_r->vc_desc_cnt--;
159         dev->desc_tbl[desc1 -1].timestamp = 0;
160         IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
161                                    dev->desc_tbl[desc1 -1].txskb, desc1);)
162         if (iavcc_r->pcr < dev->rate_limit) {
163            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
164            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
165               printk("ia_hack_tcq: No memory available\n");
166         } 
167         dev->desc_tbl[desc1 -1].iavcc = NULL;
168         dev->desc_tbl[desc1 -1].txskb = NULL;
169      }
170      dev->host_tcq_wr += 2;
171      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
172         dev->host_tcq_wr = dev->ffL.tcq_st;
173   }
174 } /* ia_hack_tcq */
175
176 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
177   u_short               desc_num, i;
178   struct sk_buff        *skb;
179   struct ia_vcc         *iavcc_r = NULL; 
180   unsigned long delta;
181   static unsigned long timer = 0;
182   int ltimeout;
183
184   ia_hack_tcq (dev);
185   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
186      timer = jiffies; 
187      i=0;
188      while (i < dev->num_tx_desc) {
189         if (!dev->desc_tbl[i].timestamp) {
190            i++;
191            continue;
192         }
193         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
194         delta = jiffies - dev->desc_tbl[i].timestamp;
195         if (delta >= ltimeout) {
196            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
197            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
198               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
199            else 
200               dev->ffL.tcq_rd -= 2;
201            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
202            if (!(skb = dev->desc_tbl[i].txskb) || 
203                           !(iavcc_r = dev->desc_tbl[i].iavcc))
204               printk("Fatal err, desc table vcc or skb is NULL\n");
205            else 
206               iavcc_r->vc_desc_cnt--;
207            dev->desc_tbl[i].timestamp = 0;
208            dev->desc_tbl[i].iavcc = NULL;
209            dev->desc_tbl[i].txskb = NULL;
210         }
211         i++;
212      } /* while */
213   }
214   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
215      return 0xFFFF;
216     
217   /* Get the next available descriptor number from TCQ */
218   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
219
220   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
221      dev->ffL.tcq_rd += 2;
222      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
223         dev->ffL.tcq_rd = dev->ffL.tcq_st;
224      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
225         return 0xFFFF; 
226      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
227   }
228
229   /* get system time */
230   dev->desc_tbl[desc_num -1].timestamp = jiffies;
231   return desc_num;
232 }
233
234 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
235   u_char                foundLockUp;
236   vcstatus_t            *vcstatus;
237   u_short               *shd_tbl;
238   u_short               tempCellSlot, tempFract;
239   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
240   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
241   u_int  i;
242
243   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
244      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
245      vcstatus->cnt++;
246      foundLockUp = 0;
247      if( vcstatus->cnt == 0x05 ) {
248         abr_vc += vcc->vci;
249         eabr_vc += vcc->vci;
250         if( eabr_vc->last_desc ) {
251            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
252               /* Wait for 10 Micro sec */
253               udelay(10);
254               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
255                  foundLockUp = 1;
256            }
257            else {
258               tempCellSlot = abr_vc->last_cell_slot;
259               tempFract    = abr_vc->fraction;
260               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
261                          && (tempFract == dev->testTable[vcc->vci]->fract))
262                  foundLockUp = 1;                   
263               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
264               dev->testTable[vcc->vci]->fract = tempFract; 
265            }        
266         } /* last descriptor */            
267         vcstatus->cnt = 0;      
268      } /* vcstatus->cnt */
269         
270      if (foundLockUp) {
271         IF_ABR(printk("LOCK UP found\n");) 
272         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
273         /* Wait for 10 Micro sec */
274         udelay(10); 
275         abr_vc->status &= 0xFFF8;
276         abr_vc->status |= 0x0001;  /* state is idle */
277         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
278         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
279         if (i < dev->num_vc)
280            shd_tbl[i] = vcc->vci;
281         else
282            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
283         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
284         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
285         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
286         vcstatus->cnt = 0;
287      } /* foundLockUp */
288
289   } /* if an ABR VC */
290
291
292 }
293  
294 /*
295 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
296 **
297 **  +----+----+------------------+-------------------------------+
298 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
299 **  +----+----+------------------+-------------------------------+
300 ** 
301 **    R = reserved (written as 0)
302 **    NZ = 0 if 0 cells/sec; 1 otherwise
303 **
304 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
305 */
306 static u16
307 cellrate_to_float(u32 cr)
308 {
309
310 #define NZ              0x4000
311 #define M_BITS          9               /* Number of bits in mantissa */
312 #define E_BITS          5               /* Number of bits in exponent */
313 #define M_MASK          0x1ff           
314 #define E_MASK          0x1f
315   u16   flot;
316   u32   tmp = cr & 0x00ffffff;
317   int   i   = 0;
318   if (cr == 0)
319      return 0;
320   while (tmp != 1) {
321      tmp >>= 1;
322      i++;
323   }
324   if (i == M_BITS)
325      flot = NZ | (i << M_BITS) | (cr & M_MASK);
326   else if (i < M_BITS)
327      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
328   else
329      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
330   return flot;
331 }
332
333 #if 0
334 /*
335 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
336 */
337 static u32
338 float_to_cellrate(u16 rate)
339 {
340   u32   exp, mantissa, cps;
341   if ((rate & NZ) == 0)
342      return 0;
343   exp = (rate >> M_BITS) & E_MASK;
344   mantissa = rate & M_MASK;
345   if (exp == 0)
346      return 1;
347   cps = (1 << M_BITS) | mantissa;
348   if (exp == M_BITS)
349      cps = cps;
350   else if (exp > M_BITS)
351      cps <<= (exp - M_BITS);
352   else
353      cps >>= (M_BITS - exp);
354   return cps;
355 }
356 #endif 
357
358 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
359   srv_p->class_type = ATM_ABR;
360   srv_p->pcr        = dev->LineRate;
361   srv_p->mcr        = 0;
362   srv_p->icr        = 0x055cb7;
363   srv_p->tbe        = 0xffffff;
364   srv_p->frtt       = 0x3a;
365   srv_p->rif        = 0xf;
366   srv_p->rdf        = 0xb;
367   srv_p->nrm        = 0x4;
368   srv_p->trm        = 0x7;
369   srv_p->cdf        = 0x3;
370   srv_p->adtf       = 50;
371 }
372
373 static int
374 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
375                                                 struct atm_vcc *vcc, u8 flag)
376 {
377   f_vc_abr_entry  *f_abr_vc;
378   r_vc_abr_entry  *r_abr_vc;
379   u32           icr;
380   u8            trm, nrm, crm;
381   u16           adtf, air, *ptr16;      
382   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
383   f_abr_vc += vcc->vci;       
384   switch (flag) {
385      case 1: /* FFRED initialization */
386 #if 0  /* sanity check */
387        if (srv_p->pcr == 0)
388           return INVALID_PCR;
389        if (srv_p->pcr > dev->LineRate)
390           srv_p->pcr = dev->LineRate;
391        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
392           return MCR_UNAVAILABLE;
393        if (srv_p->mcr > srv_p->pcr)
394           return INVALID_MCR;
395        if (!(srv_p->icr))
396           srv_p->icr = srv_p->pcr;
397        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
398           return INVALID_ICR;
399        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
400           return INVALID_TBE;
401        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
402           return INVALID_FRTT;
403        if (srv_p->nrm > MAX_NRM)
404           return INVALID_NRM;
405        if (srv_p->trm > MAX_TRM)
406           return INVALID_TRM;
407        if (srv_p->adtf > MAX_ADTF)
408           return INVALID_ADTF;
409        else if (srv_p->adtf == 0)
410           srv_p->adtf = 1;
411        if (srv_p->cdf > MAX_CDF)
412           return INVALID_CDF;
413        if (srv_p->rif > MAX_RIF)
414           return INVALID_RIF;
415        if (srv_p->rdf > MAX_RDF)
416           return INVALID_RDF;
417 #endif
418        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
419        f_abr_vc->f_vc_type = ABR;
420        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
421                                   /* i.e 2**n = 2 << (n-1) */
422        f_abr_vc->f_nrm = nrm << 8 | nrm;
423        trm = 100000/(2 << (16 - srv_p->trm));
424        if ( trm == 0) trm = 1;
425        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
426        crm = srv_p->tbe / nrm;
427        if (crm == 0) crm = 1;
428        f_abr_vc->f_crm = crm & 0xff;
429        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
430        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
431                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
432                                 (1000000/(srv_p->frtt/srv_p->tbe)));
433        f_abr_vc->f_icr = cellrate_to_float(icr);
434        adtf = (10000 * srv_p->adtf)/8192;
435        if (adtf == 0) adtf = 1; 
436        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
437        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
438        f_abr_vc->f_acr = f_abr_vc->f_icr;
439        f_abr_vc->f_status = 0x0042;
440        break;
441     case 0: /* RFRED initialization */  
442        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
443        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
444        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
445        r_abr_vc += vcc->vci;
446        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
447        air = srv_p->pcr << (15 - srv_p->rif);
448        if (air == 0) air = 1;
449        r_abr_vc->r_air = cellrate_to_float(air);
450        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
451        dev->sum_mcr        += srv_p->mcr;
452        dev->n_abr++;
453        break;
454     default:
455        break;
456   }
457   return        0;
458 }
459 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
460    u32 rateLow=0, rateHigh, rate;
461    int entries;
462    struct ia_vcc *ia_vcc;
463
464    int   idealSlot =0, testSlot, toBeAssigned, inc;
465    u32   spacing;
466    u16  *SchedTbl, *TstSchedTbl;
467    u16  cbrVC, vcIndex;
468    u32   fracSlot    = 0;
469    u32   sp_mod      = 0;
470    u32   sp_mod2     = 0;
471
472    /* IpAdjustTrafficParams */
473    if (vcc->qos.txtp.max_pcr <= 0) {
474       IF_ERR(printk("PCR for CBR not defined\n");)
475       return -1;
476    }
477    rate = vcc->qos.txtp.max_pcr;
478    entries = rate / dev->Granularity;
479    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
480                                 entries, rate, dev->Granularity);)
481    if (entries < 1)
482       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
483    rateLow  =  entries * dev->Granularity;
484    rateHigh = (entries + 1) * dev->Granularity;
485    if (3*(rate - rateLow) > (rateHigh - rate))
486       entries++;
487    if (entries > dev->CbrRemEntries) {
488       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
489       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
490                                        entries, dev->CbrRemEntries);)
491       return -EBUSY;
492    }   
493
494    ia_vcc = INPH_IA_VCC(vcc);
495    ia_vcc->NumCbrEntry = entries; 
496    dev->sum_mcr += entries * dev->Granularity; 
497    /* IaFFrednInsertCbrSched */
498    // Starting at an arbitrary location, place the entries into the table
499    // as smoothly as possible
500    cbrVC   = 0;
501    spacing = dev->CbrTotEntries / entries;
502    sp_mod  = dev->CbrTotEntries % entries; // get modulo
503    toBeAssigned = entries;
504    fracSlot = 0;
505    vcIndex  = vcc->vci;
506    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
507    while (toBeAssigned)
508    {
509       // If this is the first time, start the table loading for this connection
510       // as close to entryPoint as possible.
511       if (toBeAssigned == entries)
512       {
513          idealSlot = dev->CbrEntryPt;
514          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
515          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
516             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
517       } else {
518          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
519          // in the table that would be  smoothest
520          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
521          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
522       }
523       if (idealSlot >= (int)dev->CbrTotEntries) 
524          idealSlot -= dev->CbrTotEntries;  
525       // Continuously check around this ideal value until a null
526       // location is encountered.
527       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
528       inc = 0;
529       testSlot = idealSlot;
530       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
531       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
532                                 testSlot, TstSchedTbl,toBeAssigned);)
533       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
534       while (cbrVC)  // If another VC at this location, we have to keep looking
535       {
536           inc++;
537           testSlot = idealSlot - inc;
538           if (testSlot < 0) { // Wrap if necessary
539              testSlot += dev->CbrTotEntries;
540              IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
541                                                        SchedTbl,testSlot);)
542           }
543           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
544           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
545           if (!cbrVC)
546              break;
547           testSlot = idealSlot + inc;
548           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
549              testSlot -= dev->CbrTotEntries;
550              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
551              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
552                                             testSlot, toBeAssigned);)
553           } 
554           // set table index and read in value
555           TstSchedTbl = (u16*)(SchedTbl + testSlot);
556           IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
557                           TstSchedTbl,cbrVC,inc);)
558           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
559        } /* while */
560        // Move this VCI number into this location of the CBR Sched table.
561        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
562        dev->CbrRemEntries--;
563        toBeAssigned--;
564    } /* while */ 
565
566    /* IaFFrednCbrEnable */
567    dev->NumEnabledCBR++;
568    if (dev->NumEnabledCBR == 1) {
569        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
570        IF_CBR(printk("CBR is enabled\n");)
571    }
572    return 0;
573 }
574 static void ia_cbrVc_close (struct atm_vcc *vcc) {
575    IADEV *iadev;
576    u16 *SchedTbl, NullVci = 0;
577    u32 i, NumFound;
578
579    iadev = INPH_IA_DEV(vcc->dev);
580    iadev->NumEnabledCBR--;
581    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
582    if (iadev->NumEnabledCBR == 0) {
583       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
584       IF_CBR (printk("CBR support disabled\n");)
585    }
586    NumFound = 0;
587    for (i=0; i < iadev->CbrTotEntries; i++)
588    {
589       if (*SchedTbl == vcc->vci) {
590          iadev->CbrRemEntries++;
591          *SchedTbl = NullVci;
592          IF_CBR(NumFound++;)
593       }
594       SchedTbl++;   
595    } 
596    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
597 }
598
599 static int ia_avail_descs(IADEV *iadev) {
600    int tmp = 0;
601    ia_hack_tcq(iadev);
602    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
603       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
604    else
605       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
606                    iadev->ffL.tcq_st) / 2;
607    return tmp;
608 }    
609
610 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
611
612 static int ia_que_tx (IADEV *iadev) { 
613    struct sk_buff *skb;
614    int num_desc;
615    struct atm_vcc *vcc;
616    num_desc = ia_avail_descs(iadev);
617
618    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
619       if (!(vcc = ATM_SKB(skb)->vcc)) {
620          dev_kfree_skb_any(skb);
621          printk("ia_que_tx: Null vcc\n");
622          break;
623       }
624       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
625          dev_kfree_skb_any(skb);
626          printk("Free the SKB on closed vci %d \n", vcc->vci);
627          break;
628       }
629       if (ia_pkt_tx (vcc, skb)) {
630          skb_queue_head(&iadev->tx_backlog, skb);
631       }
632       num_desc--;
633    }
634    return 0;
635 }
636
637 static void ia_tx_poll (IADEV *iadev) {
638    struct atm_vcc *vcc = NULL;
639    struct sk_buff *skb = NULL, *skb1 = NULL;
640    struct ia_vcc *iavcc;
641    IARTN_Q *  rtne;
642
643    ia_hack_tcq(iadev);
644    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
645        skb = rtne->data.txskb;
646        if (!skb) {
647            printk("ia_tx_poll: skb is null\n");
648            goto out;
649        }
650        vcc = ATM_SKB(skb)->vcc;
651        if (!vcc) {
652            printk("ia_tx_poll: vcc is null\n");
653            dev_kfree_skb_any(skb);
654            goto out;
655        }
656
657        iavcc = INPH_IA_VCC(vcc);
658        if (!iavcc) {
659            printk("ia_tx_poll: iavcc is null\n");
660            dev_kfree_skb_any(skb);
661            goto out;
662        }
663
664        skb1 = skb_dequeue(&iavcc->txing_skb);
665        while (skb1 && (skb1 != skb)) {
666           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
667              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
668           }
669           IF_ERR(printk("Release the SKB not match\n");)
670           if ((vcc->pop) && (skb1->len != 0))
671           {
672              vcc->pop(vcc, skb1);
673              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
674                                                           (long)skb1);)
675           }
676           else 
677              dev_kfree_skb_any(skb1);
678           skb1 = skb_dequeue(&iavcc->txing_skb);
679        }                                                        
680        if (!skb1) {
681           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
682           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
683           break;
684        }
685        if ((vcc->pop) && (skb->len != 0))
686        {
687           vcc->pop(vcc, skb);
688           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
689        }
690        else 
691           dev_kfree_skb_any(skb);
692        kfree(rtne);
693     }
694     ia_que_tx(iadev);
695 out:
696     return;
697 }
698 #if 0
699 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
700 {
701         u32     t;
702         int     i;
703         /*
704          * Issue a command to enable writes to the NOVRAM
705          */
706         NVRAM_CMD (EXTEND + EWEN);
707         NVRAM_CLR_CE;
708         /*
709          * issue the write command
710          */
711         NVRAM_CMD(IAWRITE + addr);
712         /* 
713          * Send the data, starting with D15, then D14, and so on for 16 bits
714          */
715         for (i=15; i>=0; i--) {
716                 NVRAM_CLKOUT (val & 0x8000);
717                 val <<= 1;
718         }
719         NVRAM_CLR_CE;
720         CFG_OR(NVCE);
721         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
722         while (!(t & NVDO))
723                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
724
725         NVRAM_CLR_CE;
726         /*
727          * disable writes again
728          */
729         NVRAM_CMD(EXTEND + EWDS)
730         NVRAM_CLR_CE;
731         CFG_AND(~NVDI);
732 }
733 #endif
734
735 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
736 {
737         u_short val;
738         u32     t;
739         int     i;
740         /*
741          * Read the first bit that was clocked with the falling edge of the
742          * the last command data clock
743          */
744         NVRAM_CMD(IAREAD + addr);
745         /*
746          * Now read the rest of the bits, the next bit read is D14, then D13,
747          * and so on.
748          */
749         val = 0;
750         for (i=15; i>=0; i--) {
751                 NVRAM_CLKIN(t);
752                 val |= (t << i);
753         }
754         NVRAM_CLR_CE;
755         CFG_AND(~NVDI);
756         return val;
757 }
758
759 static void ia_hw_type(IADEV *iadev) {
760    u_short memType = ia_eeprom_get(iadev, 25);   
761    iadev->memType = memType;
762    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
763       iadev->num_tx_desc = IA_TX_BUF;
764       iadev->tx_buf_sz = IA_TX_BUF_SZ;
765       iadev->num_rx_desc = IA_RX_BUF;
766       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
767    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
768       if (IA_TX_BUF == DFL_TX_BUFFERS)
769         iadev->num_tx_desc = IA_TX_BUF / 2;
770       else 
771         iadev->num_tx_desc = IA_TX_BUF;
772       iadev->tx_buf_sz = IA_TX_BUF_SZ;
773       if (IA_RX_BUF == DFL_RX_BUFFERS)
774         iadev->num_rx_desc = IA_RX_BUF / 2;
775       else
776         iadev->num_rx_desc = IA_RX_BUF;
777       iadev->rx_buf_sz = IA_RX_BUF_SZ;
778    }
779    else {
780       if (IA_TX_BUF == DFL_TX_BUFFERS) 
781         iadev->num_tx_desc = IA_TX_BUF / 8;
782       else
783         iadev->num_tx_desc = IA_TX_BUF;
784       iadev->tx_buf_sz = IA_TX_BUF_SZ;
785       if (IA_RX_BUF == DFL_RX_BUFFERS)
786         iadev->num_rx_desc = IA_RX_BUF / 8;
787       else
788         iadev->num_rx_desc = IA_RX_BUF;
789       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
790    } 
791    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
792    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
793          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
794          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
795
796 #if 0
797    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
798       iadev->phy_type = PHY_OC3C_S;
799    else if ((memType & FE_MASK) == FE_UTP_OPTION)
800       iadev->phy_type = PHY_UTP155;
801    else
802      iadev->phy_type = PHY_OC3C_M;
803 #endif
804    
805    iadev->phy_type = memType & FE_MASK;
806    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
807                                          memType,iadev->phy_type);)
808    if (iadev->phy_type == FE_25MBIT_PHY) 
809       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
810    else if (iadev->phy_type == FE_DS3_PHY)
811       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
812    else if (iadev->phy_type == FE_E3_PHY) 
813       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
814    else
815        iadev->LineRate = (u32)(ATM_OC3_PCR);
816    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
817
818 }
819
820 static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
821 {
822         return readl(ia->phy + (reg >> 2));
823 }
824
825 static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
826 {
827         writel(val, ia->phy + (reg >> 2));
828 }
829
830 static void ia_frontend_intr(struct iadev_priv *iadev)
831 {
832         u32 status;
833
834         if (iadev->phy_type & FE_25MBIT_PHY) {
835                 status = ia_phy_read32(iadev, MB25_INTR_STATUS);
836                 iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
837         } else if (iadev->phy_type & FE_DS3_PHY) {
838                 ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
839                 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
840                 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
841         } else if (iadev->phy_type & FE_E3_PHY) {
842                 ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
843                 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
844                 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
845         } else {
846                 status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
847                 iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
848         }
849
850         printk(KERN_INFO "IA: SUNI carrier %s\n",
851                 iadev->carrier_detect ? "detected" : "lost signal");
852 }
853
854 static void ia_mb25_init(struct iadev_priv *iadev)
855 {
856 #if 0
857    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
858 #endif
859         ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
860         ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
861
862         iadev->carrier_detect =
863                 (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
864 }
865
866 struct ia_reg {
867         u16 reg;
868         u16 val;
869 };
870
871 static void ia_phy_write(struct iadev_priv *iadev,
872                          const struct ia_reg *regs, int len)
873 {
874         while (len--) {
875                 ia_phy_write32(iadev, regs->reg, regs->val);
876                 regs++;
877         }
878 }
879
880 static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
881 {
882         static const struct ia_reg suni_ds3_init [] = {
883                 { SUNI_DS3_FRM_INTR_ENBL,       0x17 },
884                 { SUNI_DS3_FRM_CFG,             0x01 },
885                 { SUNI_DS3_TRAN_CFG,            0x01 },
886                 { SUNI_CONFIG,                  0 },
887                 { SUNI_SPLR_CFG,                0 },
888                 { SUNI_SPLT_CFG,                0 }
889         };
890         u32 status;
891
892         status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
893         iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
894
895         ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
896 }
897
898 static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
899 {
900         static const struct ia_reg suni_e3_init [] = {
901                 { SUNI_E3_FRM_FRAM_OPTIONS,             0x04 },
902                 { SUNI_E3_FRM_MAINT_OPTIONS,            0x20 },
903                 { SUNI_E3_FRM_FRAM_INTR_ENBL,           0x1d },
904                 { SUNI_E3_FRM_MAINT_INTR_ENBL,          0x30 },
905                 { SUNI_E3_TRAN_STAT_DIAG_OPTIONS,       0 },
906                 { SUNI_E3_TRAN_FRAM_OPTIONS,            0x01 },
907                 { SUNI_CONFIG,                          SUNI_PM7345_E3ENBL },
908                 { SUNI_SPLR_CFG,                        0x41 },
909                 { SUNI_SPLT_CFG,                        0x41 }
910         };
911         u32 status;
912
913         status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
914         iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
915         ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
916 }
917
918 static void ia_suni_pm7345_init(struct iadev_priv *iadev)
919 {
920         static const struct ia_reg suni_init [] = {
921                 /* Enable RSOP loss of signal interrupt. */
922                 { SUNI_INTR_ENBL,               0x28 },
923                 /* Clear error counters. */
924                 { SUNI_ID_RESET,                0 },
925                 /* Clear "PMCTST" in master test register. */
926                 { SUNI_MASTER_TEST,             0 },
927
928                 { SUNI_RXCP_CTRL,               0x2c },
929                 { SUNI_RXCP_FCTRL,              0x81 },
930
931                 { SUNI_RXCP_IDLE_PAT_H1,        0 },
932                 { SUNI_RXCP_IDLE_PAT_H2,        0 },
933                 { SUNI_RXCP_IDLE_PAT_H3,        0 },
934                 { SUNI_RXCP_IDLE_PAT_H4,        0x01 },
935
936                 { SUNI_RXCP_IDLE_MASK_H1,       0xff },
937                 { SUNI_RXCP_IDLE_MASK_H2,       0xff },
938                 { SUNI_RXCP_IDLE_MASK_H3,       0xff },
939                 { SUNI_RXCP_IDLE_MASK_H4,       0xfe },
940
941                 { SUNI_RXCP_CELL_PAT_H1,        0 },
942                 { SUNI_RXCP_CELL_PAT_H2,        0 },
943                 { SUNI_RXCP_CELL_PAT_H3,        0 },
944                 { SUNI_RXCP_CELL_PAT_H4,        0x01 },
945
946                 { SUNI_RXCP_CELL_MASK_H1,       0xff },
947                 { SUNI_RXCP_CELL_MASK_H2,       0xff },
948                 { SUNI_RXCP_CELL_MASK_H3,       0xff },
949                 { SUNI_RXCP_CELL_MASK_H4,       0xff },
950
951                 { SUNI_TXCP_CTRL,               0xa4 },
952                 { SUNI_TXCP_INTR_EN_STS,        0x10 },
953                 { SUNI_TXCP_IDLE_PAT_H5,        0x55 }
954         };
955
956         if (iadev->phy_type & FE_DS3_PHY)
957                 ia_suni_pm7345_init_ds3(iadev);
958         else
959                 ia_suni_pm7345_init_e3(iadev);
960
961         ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
962
963         ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
964                 ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
965                   SUNI_PM7345_DLB | SUNI_PM7345_PLB));
966 #ifdef __SNMP__
967    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
968 #endif /* __SNMP__ */
969    return;
970 }
971
972
973 /***************************** IA_LIB END *****************************/
974     
975 #ifdef CONFIG_ATM_IA_DEBUG
976 static int tcnter = 0;
977 static void xdump( u_char*  cp, int  length, char*  prefix )
978 {
979     int col, count;
980     u_char prntBuf[120];
981     u_char*  pBuf = prntBuf;
982     count = 0;
983     while(count < length){
984         pBuf += sprintf( pBuf, "%s", prefix );
985         for(col = 0;count + col < length && col < 16; col++){
986             if (col != 0 && (col % 4) == 0)
987                 pBuf += sprintf( pBuf, " " );
988             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
989         }
990         while(col++ < 16){      /* pad end of buffer with blanks */
991             if ((col % 4) == 0)
992                 sprintf( pBuf, " " );
993             pBuf += sprintf( pBuf, "   " );
994         }
995         pBuf += sprintf( pBuf, "  " );
996         for(col = 0;count + col < length && col < 16; col++){
997             if (isprint((int)cp[count + col]))
998                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
999             else
1000                 pBuf += sprintf( pBuf, "." );
1001                 }
1002         printk("%s\n", prntBuf);
1003         count += col;
1004         pBuf = prntBuf;
1005     }
1006
1007 }  /* close xdump(... */
1008 #endif /* CONFIG_ATM_IA_DEBUG */
1009
1010   
1011 static struct atm_dev *ia_boards = NULL;  
1012   
1013 #define ACTUAL_RAM_BASE \
1014         RAM_BASE*((iadev->mem)/(128 * 1024))  
1015 #define ACTUAL_SEG_RAM_BASE \
1016         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1017 #define ACTUAL_REASS_RAM_BASE \
1018         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1019   
1020   
1021 /*-- some utilities and memory allocation stuff will come here -------------*/  
1022   
1023 static void desc_dbg(IADEV *iadev) {
1024
1025   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1026   u32 i;
1027   void __iomem *tmp;
1028   // regval = readl((u32)ia_cmds->maddr);
1029   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1030   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1031                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1032                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1033   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1034                    iadev->ffL.tcq_rd);
1035   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1036   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1037   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1038   i = 0;
1039   while (tcq_st_ptr != tcq_ed_ptr) {
1040       tmp = iadev->seg_ram+tcq_st_ptr;
1041       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1042       tcq_st_ptr += 2;
1043   }
1044   for(i=0; i <iadev->num_tx_desc; i++)
1045       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1046
1047   
1048   
1049 /*----------------------------- Receiving side stuff --------------------------*/  
1050  
1051 static void rx_excp_rcvd(struct atm_dev *dev)  
1052 {  
1053 #if 0 /* closing the receiving size will cause too many excp int */  
1054   IADEV *iadev;  
1055   u_short state;  
1056   u_short excpq_rd_ptr;  
1057   //u_short *ptr;  
1058   int vci, error = 1;  
1059   iadev = INPH_IA_DEV(dev);  
1060   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1061   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1062   { printk("state = %x \n", state); 
1063         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1064  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1065         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1066             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1067         // TODO: update exception stat
1068         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1069         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1070         // pwang_test
1071         excpq_rd_ptr += 4;  
1072         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1073             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1074         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1075         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1076   }  
1077 #endif
1078 }  
1079   
1080 static void free_desc(struct atm_dev *dev, int desc)  
1081 {  
1082         IADEV *iadev;  
1083         iadev = INPH_IA_DEV(dev);  
1084         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1085         iadev->rfL.fdq_wr +=2;
1086         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1087                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1088         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1089 }  
1090   
1091   
1092 static int rx_pkt(struct atm_dev *dev)  
1093 {  
1094         IADEV *iadev;  
1095         struct atm_vcc *vcc;  
1096         unsigned short status;  
1097         struct rx_buf_desc __iomem *buf_desc_ptr;  
1098         int desc;   
1099         struct dle* wr_ptr;  
1100         int len;  
1101         struct sk_buff *skb;  
1102         u_int buf_addr, dma_addr;  
1103
1104         iadev = INPH_IA_DEV(dev);  
1105         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1106         {  
1107             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1108             return -EINVAL;  
1109         }  
1110         /* mask 1st 3 bits to get the actual descno. */  
1111         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1112         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1113                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1114               printk(" pcq_wr_ptr = 0x%x\n",
1115                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1116         /* update the read pointer  - maybe we shud do this in the end*/  
1117         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1118                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1119         else  
1120                 iadev->rfL.pcq_rd += 2;
1121         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1122   
1123         /* get the buffer desc entry.  
1124                 update stuff. - doesn't seem to be any update necessary  
1125         */  
1126         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1127         /* make the ptr point to the corresponding buffer desc entry */  
1128         buf_desc_ptr += desc;     
1129         if (!desc || (desc > iadev->num_rx_desc) || 
1130                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1131             free_desc(dev, desc);
1132             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1133             return -1;
1134         }
1135         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1136         if (!vcc)  
1137         {      
1138                 free_desc(dev, desc); 
1139                 printk("IA: null vcc, drop PDU\n");  
1140                 return -1;  
1141         }  
1142           
1143   
1144         /* might want to check the status bits for errors */  
1145         status = (u_short) (buf_desc_ptr->desc_mode);  
1146         if (status & (RX_CER | RX_PTE | RX_OFL))  
1147         {  
1148                 atomic_inc(&vcc->stats->rx_err);
1149                 IF_ERR(printk("IA: bad packet, dropping it");)  
1150                 if (status & RX_CER) { 
1151                     IF_ERR(printk(" cause: packet CRC error\n");)
1152                 }
1153                 else if (status & RX_PTE) {
1154                     IF_ERR(printk(" cause: packet time out\n");)
1155                 }
1156                 else {
1157                     IF_ERR(printk(" cause: buffer overflow\n");)
1158                 }
1159                 goto out_free_desc;
1160         }  
1161   
1162         /*  
1163                 build DLE.        
1164         */  
1165   
1166         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1167         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1168         len = dma_addr - buf_addr;  
1169         if (len > iadev->rx_buf_sz) {
1170            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1171            atomic_inc(&vcc->stats->rx_err);
1172            goto out_free_desc;
1173         }
1174                   
1175         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1176            if (vcc->vci < 32)
1177               printk("Drop control packets\n");
1178               goto out_free_desc;
1179         }
1180         skb_put(skb,len);  
1181         // pwang_test
1182         ATM_SKB(skb)->vcc = vcc;
1183         ATM_DESC(skb) = desc;        
1184         skb_queue_tail(&iadev->rx_dma_q, skb);  
1185
1186         /* Build the DLE structure */  
1187         wr_ptr = iadev->rx_dle_q.write;  
1188         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1189                 len, PCI_DMA_FROMDEVICE);
1190         wr_ptr->local_pkt_addr = buf_addr;  
1191         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1192         wr_ptr->mode = DMA_INT_ENABLE;  
1193   
1194         /* shud take care of wrap around here too. */  
1195         if(++wr_ptr == iadev->rx_dle_q.end)
1196              wr_ptr = iadev->rx_dle_q.start;
1197         iadev->rx_dle_q.write = wr_ptr;  
1198         udelay(1);  
1199         /* Increment transaction counter */  
1200         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1201 out:    return 0;  
1202 out_free_desc:
1203         free_desc(dev, desc);
1204         goto out;
1205 }  
1206   
1207 static void rx_intr(struct atm_dev *dev)  
1208 {  
1209   IADEV *iadev;  
1210   u_short status;  
1211   u_short state, i;  
1212   
1213   iadev = INPH_IA_DEV(dev);  
1214   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1215   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1216   if (status & RX_PKT_RCVD)  
1217   {  
1218         /* do something */  
1219         /* Basically recvd an interrupt for receiving a packet.  
1220         A descriptor would have been written to the packet complete   
1221         queue. Get all the descriptors and set up dma to move the   
1222         packets till the packet complete queue is empty..  
1223         */  
1224         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1225         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1226         while(!(state & PCQ_EMPTY))  
1227         {  
1228              rx_pkt(dev);  
1229              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1230         }  
1231         iadev->rxing = 1;
1232   }  
1233   if (status & RX_FREEQ_EMPT)  
1234   {   
1235      if (iadev->rxing) {
1236         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1237         iadev->rx_tmp_jif = jiffies; 
1238         iadev->rxing = 0;
1239      } 
1240      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1241                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1242         for (i = 1; i <= iadev->num_rx_desc; i++)
1243                free_desc(dev, i);
1244 printk("Test logic RUN!!!!\n");
1245         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1246         iadev->rxing = 1;
1247      }
1248      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1249   }  
1250
1251   if (status & RX_EXCP_RCVD)  
1252   {  
1253         /* probably need to handle the exception queue also. */  
1254         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1255         rx_excp_rcvd(dev);  
1256   }  
1257
1258
1259   if (status & RX_RAW_RCVD)  
1260   {  
1261         /* need to handle the raw incoming cells. This deepnds on   
1262         whether we have programmed to receive the raw cells or not.  
1263         Else ignore. */  
1264         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1265   }  
1266 }  
1267   
1268   
1269 static void rx_dle_intr(struct atm_dev *dev)  
1270 {  
1271   IADEV *iadev;  
1272   struct atm_vcc *vcc;   
1273   struct sk_buff *skb;  
1274   int desc;  
1275   u_short state;   
1276   struct dle *dle, *cur_dle;  
1277   u_int dle_lp;  
1278   int len;
1279   iadev = INPH_IA_DEV(dev);  
1280  
1281   /* free all the dles done, that is just update our own dle read pointer   
1282         - do we really need to do this. Think not. */  
1283   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1284         and push them up to the higher layer protocol. Also free the desc  
1285         associated with the buffer. */  
1286   dle = iadev->rx_dle_q.read;  
1287   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1288   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1289   while(dle != cur_dle)  
1290   {  
1291       /* free the DMAed skb */  
1292       skb = skb_dequeue(&iadev->rx_dma_q);  
1293       if (!skb)  
1294          goto INCR_DLE;
1295       desc = ATM_DESC(skb);
1296       free_desc(dev, desc);  
1297                
1298       if (!(len = skb->len))
1299       {  
1300           printk("rx_dle_intr: skb len 0\n");  
1301           dev_kfree_skb_any(skb);  
1302       }  
1303       else  
1304       {  
1305           struct cpcs_trailer *trailer;
1306           u_short length;
1307           struct ia_vcc *ia_vcc;
1308
1309           pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1310                 len, PCI_DMA_FROMDEVICE);
1311           /* no VCC related housekeeping done as yet. lets see */  
1312           vcc = ATM_SKB(skb)->vcc;
1313           if (!vcc) {
1314               printk("IA: null vcc\n");  
1315               dev_kfree_skb_any(skb);
1316               goto INCR_DLE;
1317           }
1318           ia_vcc = INPH_IA_VCC(vcc);
1319           if (ia_vcc == NULL)
1320           {
1321              atomic_inc(&vcc->stats->rx_err);
1322              atm_return(vcc, skb->truesize);
1323              dev_kfree_skb_any(skb);
1324              goto INCR_DLE;
1325            }
1326           // get real pkt length  pwang_test
1327           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1328                                  skb->len - sizeof(*trailer));
1329           length = swap_byte_order(trailer->length);
1330           if ((length > iadev->rx_buf_sz) || (length > 
1331                               (skb->len - sizeof(struct cpcs_trailer))))
1332           {
1333              atomic_inc(&vcc->stats->rx_err);
1334              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1335                                                             length, skb->len);)
1336              atm_return(vcc, skb->truesize);
1337              dev_kfree_skb_any(skb);
1338              goto INCR_DLE;
1339           }
1340           skb_trim(skb, length);
1341           
1342           /* Display the packet */  
1343           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1344           xdump(skb->data, skb->len, "RX: ");
1345           printk("\n");)
1346
1347           IF_RX(printk("rx_dle_intr: skb push");)  
1348           vcc->push(vcc,skb);  
1349           atomic_inc(&vcc->stats->rx);
1350           iadev->rx_pkt_cnt++;
1351       }  
1352 INCR_DLE:
1353       if (++dle == iadev->rx_dle_q.end)  
1354           dle = iadev->rx_dle_q.start;  
1355   }  
1356   iadev->rx_dle_q.read = dle;  
1357   
1358   /* if the interrupts are masked because there were no free desc available,  
1359                 unmask them now. */ 
1360   if (!iadev->rxing) {
1361      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1362      if (!(state & FREEQ_EMPTY)) {
1363         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1364         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1365                                       iadev->reass_reg+REASS_MASK_REG);
1366         iadev->rxing++; 
1367      }
1368   }
1369 }  
1370   
1371   
1372 static int open_rx(struct atm_vcc *vcc)  
1373 {  
1374         IADEV *iadev;  
1375         u_short __iomem *vc_table;  
1376         u_short __iomem *reass_ptr;  
1377         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1378
1379         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1380         iadev = INPH_IA_DEV(vcc->dev);  
1381         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1382            if (iadev->phy_type & FE_25MBIT_PHY) {
1383                printk("IA:  ABR not support\n");
1384                return -EINVAL; 
1385            }
1386         }
1387         /* Make only this VCI in the vc table valid and let all   
1388                 others be invalid entries */  
1389         vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1390         vc_table += vcc->vci;
1391         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1392
1393         *vc_table = vcc->vci << 6;
1394         /* Also keep a list of open rx vcs so that we can attach them with  
1395                 incoming PDUs later. */  
1396         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1397                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1398         {  
1399                 srv_cls_param_t srv_p;
1400                 init_abr_vc(iadev, &srv_p);
1401                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1402         } 
1403         else {  /* for UBR  later may need to add CBR logic */
1404                 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1405                 reass_ptr += vcc->vci;
1406                 *reass_ptr = NO_AAL5_PKT;
1407         }
1408         
1409         if (iadev->rx_open[vcc->vci])  
1410                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1411                         vcc->dev->number, vcc->vci);  
1412         iadev->rx_open[vcc->vci] = vcc;  
1413         return 0;  
1414 }  
1415   
1416 static int rx_init(struct atm_dev *dev)  
1417 {  
1418         IADEV *iadev;  
1419         struct rx_buf_desc __iomem *buf_desc_ptr;  
1420         unsigned long rx_pkt_start = 0;  
1421         void *dle_addr;  
1422         struct abr_vc_table  *abr_vc_table; 
1423         u16 *vc_table;  
1424         u16 *reass_table;  
1425         int i,j, vcsize_sel;  
1426         u_short freeq_st_adr;  
1427         u_short *freeq_start;  
1428   
1429         iadev = INPH_IA_DEV(dev);  
1430   //    spin_lock_init(&iadev->rx_lock); 
1431   
1432         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1433         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1434                                         &iadev->rx_dle_dma);  
1435         if (!dle_addr)  {  
1436                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1437                 goto err_out;
1438         }
1439         iadev->rx_dle_q.start = (struct dle *)dle_addr;
1440         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1441         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1442         iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1443         /* the end of the dle q points to the entry after the last  
1444         DLE that can be used. */  
1445   
1446         /* write the upper 20 bits of the start address to rx list address register */  
1447         /* We know this is 32bit bus addressed so the following is safe */
1448         writel(iadev->rx_dle_dma & 0xfffff000,
1449                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1450         IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1451                       iadev->dma+IPHASE5575_TX_LIST_ADDR,
1452                       readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1453         printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1454                       iadev->dma+IPHASE5575_RX_LIST_ADDR,
1455                       readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1456   
1457         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1458         writew(0, iadev->reass_reg+MODE_REG);  
1459         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1460   
1461         /* Receive side control memory map  
1462            -------------------------------  
1463   
1464                 Buffer descr    0x0000 (736 - 23K)  
1465                 VP Table        0x5c00 (256 - 512)  
1466                 Except q        0x5e00 (128 - 512)  
1467                 Free buffer q   0x6000 (1K - 2K)  
1468                 Packet comp q   0x6800 (1K - 2K)  
1469                 Reass Table     0x7000 (1K - 2K)  
1470                 VC Table        0x7800 (1K - 2K)  
1471                 ABR VC Table    0x8000 (1K - 32K)  
1472         */  
1473           
1474         /* Base address for Buffer Descriptor Table */  
1475         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1476         /* Set the buffer size register */  
1477         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1478   
1479         /* Initialize each entry in the Buffer Descriptor Table */  
1480         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1481         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1482         memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1483         buf_desc_ptr++;  
1484         rx_pkt_start = iadev->rx_pkt_ram;  
1485         for(i=1; i<=iadev->num_rx_desc; i++)  
1486         {  
1487                 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1488                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1489                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1490                 buf_desc_ptr++;           
1491                 rx_pkt_start += iadev->rx_buf_sz;  
1492         }  
1493         IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1494         i = FREE_BUF_DESC_Q*iadev->memSize; 
1495         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1496         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1497         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1498                                          iadev->reass_reg+FREEQ_ED_ADR);
1499         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1500         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1501                                         iadev->reass_reg+FREEQ_WR_PTR);    
1502         /* Fill the FREEQ with all the free descriptors. */  
1503         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1504         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1505         for(i=1; i<=iadev->num_rx_desc; i++)  
1506         {  
1507                 *freeq_start = (u_short)i;  
1508                 freeq_start++;  
1509         }  
1510         IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1511         /* Packet Complete Queue */
1512         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1513         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1514         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1515         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1516         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1517
1518         /* Exception Queue */
1519         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1520         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1521         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1522                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1523         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1524         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1525  
1526         /* Load local copy of FREEQ and PCQ ptrs */
1527         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1528         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1529         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1530         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1531         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1532         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1533         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1534         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1535         
1536         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1537               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1538               iadev->rfL.pcq_wr);)                
1539         /* just for check - no VP TBL */  
1540         /* VP Table */  
1541         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1542         /* initialize VP Table for invalid VPIs  
1543                 - I guess we can write all 1s or 0x000f in the entire memory  
1544                   space or something similar.  
1545         */  
1546   
1547         /* This seems to work and looks right to me too !!! */  
1548         i =  REASS_TABLE * iadev->memSize;
1549         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1550         /* initialize Reassembly table to I don't know what ???? */  
1551         reass_table = (u16 *)(iadev->reass_ram+i);  
1552         j = REASS_TABLE_SZ * iadev->memSize;
1553         for(i=0; i < j; i++)  
1554                 *reass_table++ = NO_AAL5_PKT;  
1555        i = 8*1024;
1556        vcsize_sel =  0;
1557        while (i != iadev->num_vc) {
1558           i /= 2;
1559           vcsize_sel++;
1560        }
1561        i = RX_VC_TABLE * iadev->memSize;
1562        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1563        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1564         j = RX_VC_TABLE_SZ * iadev->memSize;
1565         for(i = 0; i < j; i++)  
1566         {  
1567                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1568                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1569                 is those low 3 bits.   
1570                 Shall program this later.  
1571                 */  
1572                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1573                 vc_table++;  
1574         }  
1575         /* ABR VC table */
1576         i =  ABR_VC_TABLE * iadev->memSize;
1577         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1578                    
1579         i = ABR_VC_TABLE * iadev->memSize;
1580         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1581         j = REASS_TABLE_SZ * iadev->memSize;
1582         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1583         for(i = 0; i < j; i++) {                
1584                 abr_vc_table->rdf = 0x0003;
1585                 abr_vc_table->air = 0x5eb1;
1586                 abr_vc_table++;         
1587         }  
1588
1589         /* Initialize other registers */  
1590   
1591         /* VP Filter Register set for VC Reassembly only */  
1592         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1593         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1594         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1595
1596         /* Packet Timeout Count  related Registers : 
1597            Set packet timeout to occur in about 3 seconds
1598            Set Packet Aging Interval count register to overflow in about 4 us
1599         */  
1600         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1601
1602         i = (j >> 6) & 0xFF;
1603         j += 2 * (j - 1);
1604         i |= ((j << 2) & 0xFF00);
1605         writew(i, iadev->reass_reg+TMOUT_RANGE);
1606
1607         /* initiate the desc_tble */
1608         for(i=0; i<iadev->num_tx_desc;i++)
1609             iadev->desc_tbl[i].timestamp = 0;
1610
1611         /* to clear the interrupt status register - read it */  
1612         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1613   
1614         /* Mask Register - clear it */  
1615         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1616   
1617         skb_queue_head_init(&iadev->rx_dma_q);  
1618         iadev->rx_free_desc_qhead = NULL;   
1619
1620         iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1621         if (!iadev->rx_open) {
1622                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1623                 dev->number);  
1624                 goto err_free_dle;
1625         }  
1626
1627         iadev->rxing = 1;
1628         iadev->rx_pkt_cnt = 0;
1629         /* Mode Register */  
1630         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1631         return 0;  
1632
1633 err_free_dle:
1634         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1635                             iadev->rx_dle_dma);  
1636 err_out:
1637         return -ENOMEM;
1638 }  
1639   
1640
1641 /*  
1642         The memory map suggested in appendix A and the coding for it.   
1643         Keeping it around just in case we change our mind later.  
1644   
1645                 Buffer descr    0x0000 (128 - 4K)  
1646                 UBR sched       0x1000 (1K - 4K)  
1647                 UBR Wait q      0x2000 (1K - 4K)  
1648                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1649                                         (128 - 256) each  
1650                 extended VC     0x4000 (1K - 8K)  
1651                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1652                 CBR sched       0x7000 (as needed)  
1653                 VC table        0x8000 (1K - 32K)  
1654 */  
1655   
1656 static void tx_intr(struct atm_dev *dev)  
1657 {  
1658         IADEV *iadev;  
1659         unsigned short status;  
1660         unsigned long flags;
1661
1662         iadev = INPH_IA_DEV(dev);  
1663   
1664         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1665         if (status & TRANSMIT_DONE){
1666
1667            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1668            spin_lock_irqsave(&iadev->tx_lock, flags);
1669            ia_tx_poll(iadev);
1670            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1671            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1672            if (iadev->close_pending)  
1673                wake_up(&iadev->close_wait);
1674         }         
1675         if (status & TCQ_NOT_EMPTY)  
1676         {  
1677             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1678         }  
1679 }  
1680   
1681 static void tx_dle_intr(struct atm_dev *dev)
1682 {
1683         IADEV *iadev;
1684         struct dle *dle, *cur_dle; 
1685         struct sk_buff *skb;
1686         struct atm_vcc *vcc;
1687         struct ia_vcc  *iavcc;
1688         u_int dle_lp;
1689         unsigned long flags;
1690
1691         iadev = INPH_IA_DEV(dev);
1692         spin_lock_irqsave(&iadev->tx_lock, flags);   
1693         dle = iadev->tx_dle_q.read;
1694         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1695                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1696         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1697         while (dle != cur_dle)
1698         {
1699             /* free the DMAed skb */ 
1700             skb = skb_dequeue(&iadev->tx_dma_q); 
1701             if (!skb) break;
1702
1703             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1704             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1705                 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1706                                  PCI_DMA_TODEVICE);
1707             }
1708             vcc = ATM_SKB(skb)->vcc;
1709             if (!vcc) {
1710                   printk("tx_dle_intr: vcc is null\n");
1711                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1712                   dev_kfree_skb_any(skb);
1713
1714                   return;
1715             }
1716             iavcc = INPH_IA_VCC(vcc);
1717             if (!iavcc) {
1718                   printk("tx_dle_intr: iavcc is null\n");
1719                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1720                   dev_kfree_skb_any(skb);
1721                   return;
1722             }
1723             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1724                if ((vcc->pop) && (skb->len != 0))
1725                {     
1726                  vcc->pop(vcc, skb);
1727                } 
1728                else {
1729                  dev_kfree_skb_any(skb);
1730                }
1731             }
1732             else { /* Hold the rate-limited skb for flow control */
1733                IA_SKB_STATE(skb) |= IA_DLED;
1734                skb_queue_tail(&iavcc->txing_skb, skb);
1735             }
1736             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1737             if (++dle == iadev->tx_dle_q.end)
1738                  dle = iadev->tx_dle_q.start;
1739         }
1740         iadev->tx_dle_q.read = dle;
1741         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1742 }
1743   
1744 static int open_tx(struct atm_vcc *vcc)  
1745 {  
1746         struct ia_vcc *ia_vcc;  
1747         IADEV *iadev;  
1748         struct main_vc *vc;  
1749         struct ext_vc *evc;  
1750         int ret;
1751         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1752         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1753         iadev = INPH_IA_DEV(vcc->dev);  
1754         
1755         if (iadev->phy_type & FE_25MBIT_PHY) {
1756            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1757                printk("IA:  ABR not support\n");
1758                return -EINVAL; 
1759            }
1760           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1761                printk("IA:  CBR not support\n");
1762                return -EINVAL; 
1763           }
1764         }
1765         ia_vcc =  INPH_IA_VCC(vcc);
1766         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1767         if (vcc->qos.txtp.max_sdu > 
1768                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1769            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1770                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1771            vcc->dev_data = NULL;
1772            kfree(ia_vcc);
1773            return -EINVAL; 
1774         }
1775         ia_vcc->vc_desc_cnt = 0;
1776         ia_vcc->txing = 1;
1777
1778         /* find pcr */
1779         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1780            vcc->qos.txtp.pcr = iadev->LineRate;
1781         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1782            vcc->qos.txtp.pcr = iadev->LineRate;
1783         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1784            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1785         if (vcc->qos.txtp.pcr > iadev->LineRate)
1786              vcc->qos.txtp.pcr = iadev->LineRate;
1787         ia_vcc->pcr = vcc->qos.txtp.pcr;
1788
1789         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1790         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1791         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1792         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1793         if (ia_vcc->pcr < iadev->rate_limit)
1794            skb_queue_head_init (&ia_vcc->txing_skb);
1795         if (ia_vcc->pcr < iadev->rate_limit) {
1796            struct sock *sk = sk_atm(vcc);
1797
1798            if (vcc->qos.txtp.max_sdu != 0) {
1799                if (ia_vcc->pcr > 60000)
1800                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1801                else if (ia_vcc->pcr > 2000)
1802                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1803                else
1804                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1805            }
1806            else
1807              sk->sk_sndbuf = 24576;
1808         }
1809            
1810         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1811         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1812         vc += vcc->vci;  
1813         evc += vcc->vci;  
1814         memset((caddr_t)vc, 0, sizeof(*vc));  
1815         memset((caddr_t)evc, 0, sizeof(*evc));  
1816           
1817         /* store the most significant 4 bits of vci as the last 4 bits   
1818                 of first part of atm header.  
1819            store the last 12 bits of vci as first 12 bits of the second  
1820                 part of the atm header.  
1821         */  
1822         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1823         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1824  
1825         /* check the following for different traffic classes */  
1826         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1827         {  
1828                 vc->type = UBR;  
1829                 vc->status = CRC_APPEND;
1830                 vc->acr = cellrate_to_float(iadev->LineRate);  
1831                 if (vcc->qos.txtp.pcr > 0) 
1832                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1833                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1834                                              vcc->qos.txtp.max_pcr,vc->acr);)
1835         }  
1836         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1837         {       srv_cls_param_t srv_p;
1838                 IF_ABR(printk("Tx ABR VCC\n");)  
1839                 init_abr_vc(iadev, &srv_p);
1840                 if (vcc->qos.txtp.pcr > 0) 
1841                    srv_p.pcr = vcc->qos.txtp.pcr;
1842                 if (vcc->qos.txtp.min_pcr > 0) {
1843                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1844                    if (tmpsum > iadev->LineRate)
1845                        return -EBUSY;
1846                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1847                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1848                 } 
1849                 else srv_p.mcr = 0;
1850                 if (vcc->qos.txtp.icr)
1851                    srv_p.icr = vcc->qos.txtp.icr;
1852                 if (vcc->qos.txtp.tbe)
1853                    srv_p.tbe = vcc->qos.txtp.tbe;
1854                 if (vcc->qos.txtp.frtt)
1855                    srv_p.frtt = vcc->qos.txtp.frtt;
1856                 if (vcc->qos.txtp.rif)
1857                    srv_p.rif = vcc->qos.txtp.rif;
1858                 if (vcc->qos.txtp.rdf)
1859                    srv_p.rdf = vcc->qos.txtp.rdf;
1860                 if (vcc->qos.txtp.nrm_pres)
1861                    srv_p.nrm = vcc->qos.txtp.nrm;
1862                 if (vcc->qos.txtp.trm_pres)
1863                    srv_p.trm = vcc->qos.txtp.trm;
1864                 if (vcc->qos.txtp.adtf_pres)
1865                    srv_p.adtf = vcc->qos.txtp.adtf;
1866                 if (vcc->qos.txtp.cdf_pres)
1867                    srv_p.cdf = vcc->qos.txtp.cdf;    
1868                 if (srv_p.icr > srv_p.pcr)
1869                    srv_p.icr = srv_p.pcr;    
1870                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1871                                                       srv_p.pcr, srv_p.mcr);)
1872                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1873         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1874                 if (iadev->phy_type & FE_25MBIT_PHY) {
1875                     printk("IA:  CBR not support\n");
1876                     return -EINVAL; 
1877                 }
1878                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1879                    IF_CBR(printk("PCR is not available\n");)
1880                    return -1;
1881                 }
1882                 vc->type = CBR;
1883                 vc->status = CRC_APPEND;
1884                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1885                     return ret;
1886                 }
1887        } 
1888         else  
1889            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1890         
1891         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1892         IF_EVENT(printk("ia open_tx returning \n");)  
1893         return 0;  
1894 }  
1895   
1896   
1897 static int tx_init(struct atm_dev *dev)  
1898 {  
1899         IADEV *iadev;  
1900         struct tx_buf_desc *buf_desc_ptr;
1901         unsigned int tx_pkt_start;  
1902         void *dle_addr;  
1903         int i;  
1904         u_short tcq_st_adr;  
1905         u_short *tcq_start;  
1906         u_short prq_st_adr;  
1907         u_short *prq_start;  
1908         struct main_vc *vc;  
1909         struct ext_vc *evc;   
1910         u_short tmp16;
1911         u32 vcsize_sel;
1912  
1913         iadev = INPH_IA_DEV(dev);  
1914         spin_lock_init(&iadev->tx_lock);
1915  
1916         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1917                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1918
1919         /* Allocate 4k (boundary aligned) bytes */
1920         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1921                                         &iadev->tx_dle_dma);  
1922         if (!dle_addr)  {
1923                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1924                 goto err_out;
1925         }
1926         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1927         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1928         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1929         iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1930
1931         /* write the upper 20 bits of the start address to tx list address register */  
1932         writel(iadev->tx_dle_dma & 0xfffff000,
1933                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1934         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1935         writew(0, iadev->seg_reg+MODE_REG_0);  
1936         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1937         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1938         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1939         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1940   
1941         /*  
1942            Transmit side control memory map  
1943            --------------------------------    
1944          Buffer descr   0x0000 (128 - 4K)  
1945          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1946                                         (512 - 1K) each  
1947                                         TCQ - 4K, PRQ - 5K  
1948          CBR Table      0x1800 (as needed) - 6K  
1949          UBR Table      0x3000 (1K - 4K) - 12K  
1950          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1951          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1952                                 ABR Tbl - 20K, ABR Wq - 22K   
1953          extended VC    0x6000 (1K - 8K) - 24K  
1954          VC Table       0x8000 (1K - 32K) - 32K  
1955           
1956         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1957         and Wait q, which can be allotted later.  
1958         */  
1959      
1960         /* Buffer Descriptor Table Base address */  
1961         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1962   
1963         /* initialize each entry in the buffer descriptor table */  
1964         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1965         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1966         buf_desc_ptr++;  
1967         tx_pkt_start = TX_PACKET_RAM;  
1968         for(i=1; i<=iadev->num_tx_desc; i++)  
1969         {  
1970                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1971                 buf_desc_ptr->desc_mode = AAL5;  
1972                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1973                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1974                 buf_desc_ptr++;           
1975                 tx_pkt_start += iadev->tx_buf_sz;  
1976         }  
1977         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1978         if (!iadev->tx_buf) {
1979             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1980             goto err_free_dle;
1981         }
1982         for (i= 0; i< iadev->num_tx_desc; i++)
1983         {
1984             struct cpcs_trailer *cpcs;
1985  
1986             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1987             if(!cpcs) {                
1988                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1989                 goto err_free_tx_bufs;
1990             }
1991             iadev->tx_buf[i].cpcs = cpcs;
1992             iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1993                 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1994         }
1995         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1996                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1997         if (!iadev->desc_tbl) {
1998                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1999                 goto err_free_all_tx_bufs;
2000         }
2001   
2002         /* Communication Queues base address */  
2003         i = TX_COMP_Q * iadev->memSize;
2004         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
2005   
2006         /* Transmit Complete Queue */  
2007         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
2008         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
2009         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
2010         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2011         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2012                                               iadev->seg_reg+TCQ_ED_ADR); 
2013         /* Fill the TCQ with all the free descriptors. */  
2014         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
2015         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
2016         for(i=1; i<=iadev->num_tx_desc; i++)  
2017         {  
2018                 *tcq_start = (u_short)i;  
2019                 tcq_start++;  
2020         }  
2021   
2022         /* Packet Ready Queue */  
2023         i = PKT_RDY_Q * iadev->memSize; 
2024         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2025         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2026                                               iadev->seg_reg+PRQ_ED_ADR);
2027         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2028         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2029          
2030         /* Load local copy of PRQ and TCQ ptrs */
2031         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2032         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2033         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2034
2035         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2036         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2037         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2038
2039         /* Just for safety initializing the queue to have desc 1 always */  
2040         /* Fill the PRQ with all the free descriptors. */  
2041         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2042         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2043         for(i=1; i<=iadev->num_tx_desc; i++)  
2044         {  
2045                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2046                 prq_start++;  
2047         }  
2048         /* CBR Table */  
2049         IF_INIT(printk("Start CBR Init\n");)
2050 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2051         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2052 #else /* Charlie's logic is wrong ? */
2053         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2054         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2055         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2056 #endif
2057
2058         IF_INIT(printk("value in register = 0x%x\n",
2059                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2060         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2061         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2062         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2063                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2064         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2065         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2066         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2067         IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2068                iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2069         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2070           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2071           readw(iadev->seg_reg+CBR_TAB_END+1));)
2072
2073         /* Initialize the CBR Schedualing Table */
2074         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2075                                                           0, iadev->num_vc*6); 
2076         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2077         iadev->CbrEntryPt = 0;
2078         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2079         iadev->NumEnabledCBR = 0;
2080
2081         /* UBR scheduling Table and wait queue */  
2082         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2083                 - SCHEDSZ is 1K (# of entries).  
2084                 - UBR Table size is 4K  
2085                 - UBR wait queue is 4K  
2086            since the table and wait queues are contiguous, all the bytes   
2087            can be initialized by one memeset.
2088         */  
2089         
2090         vcsize_sel = 0;
2091         i = 8*1024;
2092         while (i != iadev->num_vc) {
2093           i /= 2;
2094           vcsize_sel++;
2095         }
2096  
2097         i = MAIN_VC_TABLE * iadev->memSize;
2098         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2099         i =  EXT_VC_TABLE * iadev->memSize;
2100         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2101         i = UBR_SCHED_TABLE * iadev->memSize;
2102         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2103         i = UBR_WAIT_Q * iadev->memSize; 
2104         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2105         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2106                                                        0, iadev->num_vc*8);
2107         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2108         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2109                 - SCHEDSZ is 1K (# of entries).  
2110                 - ABR Table size is 2K  
2111                 - ABR wait queue is 2K  
2112            since the table and wait queues are contiguous, all the bytes   
2113            can be initialized by one memeset.
2114         */  
2115         i = ABR_SCHED_TABLE * iadev->memSize;
2116         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2117         i = ABR_WAIT_Q * iadev->memSize;
2118         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2119  
2120         i = ABR_SCHED_TABLE*iadev->memSize;
2121         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2122         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2123         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2124         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2125         if (!iadev->testTable) {
2126            printk("Get freepage  failed\n");
2127            goto err_free_desc_tbl;
2128         }
2129         for(i=0; i<iadev->num_vc; i++)  
2130         {  
2131                 memset((caddr_t)vc, 0, sizeof(*vc));  
2132                 memset((caddr_t)evc, 0, sizeof(*evc));  
2133                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2134                                                 GFP_KERNEL);
2135                 if (!iadev->testTable[i])
2136                         goto err_free_test_tables;
2137                 iadev->testTable[i]->lastTime = 0;
2138                 iadev->testTable[i]->fract = 0;
2139                 iadev->testTable[i]->vc_status = VC_UBR;
2140                 vc++;  
2141                 evc++;  
2142         }  
2143   
2144         /* Other Initialization */  
2145           
2146         /* Max Rate Register */  
2147         if (iadev->phy_type & FE_25MBIT_PHY) {
2148            writew(RATE25, iadev->seg_reg+MAXRATE);  
2149            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2150         }
2151         else {
2152            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2153            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2154         }
2155         /* Set Idle Header Reigisters to be sure */  
2156         writew(0, iadev->seg_reg+IDLEHEADHI);  
2157         writew(0, iadev->seg_reg+IDLEHEADLO);  
2158   
2159         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2160         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2161
2162         iadev->close_pending = 0;
2163         init_waitqueue_head(&iadev->close_wait);
2164         init_waitqueue_head(&iadev->timeout_wait);
2165         skb_queue_head_init(&iadev->tx_dma_q);  
2166         ia_init_rtn_q(&iadev->tx_return_q);  
2167
2168         /* RM Cell Protocol ID and Message Type */  
2169         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2170         skb_queue_head_init (&iadev->tx_backlog);
2171   
2172         /* Mode Register 1 */  
2173         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2174   
2175         /* Mode Register 0 */  
2176         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2177   
2178         /* Interrupt Status Register - read to clear */  
2179         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2180   
2181         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2182         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2183         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2184         iadev->tx_pkt_cnt = 0;
2185         iadev->rate_limit = iadev->LineRate / 3;
2186   
2187         return 0;
2188
2189 err_free_test_tables:
2190         while (--i >= 0)
2191                 kfree(iadev->testTable[i]);
2192         kfree(iadev->testTable);
2193 err_free_desc_tbl:
2194         kfree(iadev->desc_tbl);
2195 err_free_all_tx_bufs:
2196         i = iadev->num_tx_desc;
2197 err_free_tx_bufs:
2198         while (--i >= 0) {
2199                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2200
2201                 pci_unmap_single(iadev->pci, desc->dma_addr,
2202                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2203                 kfree(desc->cpcs);
2204         }
2205         kfree(iadev->tx_buf);
2206 err_free_dle:
2207         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2208                             iadev->tx_dle_dma);  
2209 err_out:
2210         return -ENOMEM;
2211 }   
2212    
2213 static irqreturn_t ia_int(int irq, void *dev_id)  
2214 {  
2215    struct atm_dev *dev;  
2216    IADEV *iadev;  
2217    unsigned int status;  
2218    int handled = 0;
2219
2220    dev = dev_id;  
2221    iadev = INPH_IA_DEV(dev);  
2222    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2223    { 
2224         handled = 1;
2225         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2226         if (status & STAT_REASSINT)  
2227         {  
2228            /* do something */  
2229            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2230            rx_intr(dev);  
2231         }  
2232         if (status & STAT_DLERINT)  
2233         {  
2234            /* Clear this bit by writing a 1 to it. */  
2235            writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2236            rx_dle_intr(dev);  
2237         }  
2238         if (status & STAT_SEGINT)  
2239         {  
2240            /* do something */ 
2241            IF_EVENT(printk("IA: tx_intr \n");) 
2242            tx_intr(dev);  
2243         }  
2244         if (status & STAT_DLETINT)  
2245         {  
2246            writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2247            tx_dle_intr(dev);  
2248         }  
2249         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2250         {  
2251            if (status & STAT_FEINT) 
2252                ia_frontend_intr(iadev);
2253         }  
2254    }
2255    return IRQ_RETVAL(handled);
2256 }  
2257           
2258           
2259           
2260 /*----------------------------- entries --------------------------------*/  
2261 static int get_esi(struct atm_dev *dev)  
2262 {  
2263         IADEV *iadev;  
2264         int i;  
2265         u32 mac1;  
2266         u16 mac2;  
2267           
2268         iadev = INPH_IA_DEV(dev);  
2269         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2270                                 iadev->reg+IPHASE5575_MAC1)));  
2271         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2272         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2273         for (i=0; i<MAC1_LEN; i++)  
2274                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2275           
2276         for (i=0; i<MAC2_LEN; i++)  
2277                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2278         return 0;  
2279 }  
2280           
2281 static int reset_sar(struct atm_dev *dev)  
2282 {  
2283         IADEV *iadev;  
2284         int i, error = 1;  
2285         unsigned int pci[64];  
2286           
2287         iadev = INPH_IA_DEV(dev);  
2288         for(i=0; i<64; i++)  
2289           if ((error = pci_read_config_dword(iadev->pci,  
2290                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2291               return error;  
2292         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2293         for(i=0; i<64; i++)  
2294           if ((error = pci_write_config_dword(iadev->pci,  
2295                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2296             return error;  
2297         udelay(5);  
2298         return 0;  
2299 }  
2300           
2301           
2302 static int __devinit ia_init(struct atm_dev *dev)
2303 {  
2304         IADEV *iadev;  
2305         unsigned long real_base;
2306         void __iomem *base;
2307         unsigned short command;  
2308         int error, i; 
2309           
2310         /* The device has been identified and registered. Now we read   
2311            necessary configuration info like memory base address,   
2312            interrupt number etc */  
2313           
2314         IF_INIT(printk(">ia_init\n");)  
2315         dev->ci_range.vpi_bits = 0;  
2316         dev->ci_range.vci_bits = NR_VCI_LD;  
2317
2318         iadev = INPH_IA_DEV(dev);  
2319         real_base = pci_resource_start (iadev->pci, 0);
2320         iadev->irq = iadev->pci->irq;
2321                   
2322         error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2323         if (error) {
2324                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2325                                 dev->number,error);  
2326                 return -EINVAL;  
2327         }  
2328         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2329                         dev->number, iadev->pci->revision, real_base, iadev->irq);)
2330           
2331         /* find mapping size of board */  
2332           
2333         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2334
2335         if (iadev->pci_map_size == 0x100000){
2336           iadev->num_vc = 4096;
2337           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2338           iadev->memSize = 4;
2339         }
2340         else if (iadev->pci_map_size == 0x40000) {
2341           iadev->num_vc = 1024;
2342           iadev->memSize = 1;
2343         }
2344         else {
2345            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2346            return -EINVAL;
2347         }
2348         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2349           
2350         /* enable bus mastering */
2351         pci_set_master(iadev->pci);
2352
2353         /*  
2354          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2355          */  
2356         udelay(10);  
2357           
2358         /* mapping the physical address to a virtual address in address space */  
2359         base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2360           
2361         if (!base)  
2362         {  
2363                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2364                             dev->number);  
2365                 return -ENOMEM;
2366         }  
2367         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2368                         dev->number, iadev->pci->revision, base, iadev->irq);)
2369           
2370         /* filling the iphase dev structure */  
2371         iadev->mem = iadev->pci_map_size /2;  
2372         iadev->real_base = real_base;  
2373         iadev->base = base;  
2374                   
2375         /* Bus Interface Control Registers */  
2376         iadev->reg = base + REG_BASE;
2377         /* Segmentation Control Registers */  
2378         iadev->seg_reg = base + SEG_BASE;
2379         /* Reassembly Control Registers */  
2380         iadev->reass_reg = base + REASS_BASE;  
2381         /* Front end/ DMA control registers */  
2382         iadev->phy = base + PHY_BASE;  
2383         iadev->dma = base + PHY_BASE;  
2384         /* RAM - Segmentation RAm and Reassembly RAM */  
2385         iadev->ram = base + ACTUAL_RAM_BASE;  
2386         iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2387         iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2388   
2389         /* lets print out the above */  
2390         IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2391           iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2392           iadev->phy, iadev->ram, iadev->seg_ram, 
2393           iadev->reass_ram);) 
2394           
2395         /* lets try reading the MAC address */  
2396         error = get_esi(dev);  
2397         if (error) {
2398           iounmap(iadev->base);
2399           return error;  
2400         }
2401         printk("IA: ");
2402         for (i=0; i < ESI_LEN; i++)  
2403                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2404         printk("\n");  
2405   
2406         /* reset SAR */  
2407         if (reset_sar(dev)) {
2408            iounmap(iadev->base);
2409            printk("IA: reset SAR fail, please try again\n");
2410            return 1;
2411         }
2412         return 0;  
2413 }  
2414
2415 static void ia_update_stats(IADEV *iadev) {
2416     if (!iadev->carrier_detect)
2417         return;
2418     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2419     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2420     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2421     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2422     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2423     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2424     return;
2425 }
2426   
2427 static void ia_led_timer(unsigned long arg) {
2428         unsigned long flags;
2429         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2430         u_char i;
2431         static u32 ctrl_reg; 
2432         for (i = 0; i < iadev_count; i++) {
2433            if (ia_dev[i]) {
2434               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2435               if (blinking[i] == 0) {
2436                  blinking[i]++;
2437                  ctrl_reg &= (~CTRL_LED);
2438                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2439                  ia_update_stats(ia_dev[i]);
2440               }
2441               else {
2442                  blinking[i] = 0;
2443                  ctrl_reg |= CTRL_LED;
2444                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2445                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2446                  if (ia_dev[i]->close_pending)  
2447                     wake_up(&ia_dev[i]->close_wait);
2448                  ia_tx_poll(ia_dev[i]);
2449                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2450               }
2451            }
2452         }
2453         mod_timer(&ia_timer, jiffies + HZ / 4);
2454         return;
2455 }
2456
2457 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2458         unsigned long addr)  
2459 {  
2460         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2461 }  
2462   
2463 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2464 {  
2465         return readl(INPH_IA_DEV(dev)->phy+addr);  
2466 }  
2467
2468 static void ia_free_tx(IADEV *iadev)
2469 {
2470         int i;
2471
2472         kfree(iadev->desc_tbl);
2473         for (i = 0; i < iadev->num_vc; i++)
2474                 kfree(iadev->testTable[i]);
2475         kfree(iadev->testTable);
2476         for (i = 0; i < iadev->num_tx_desc; i++) {
2477                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2478
2479                 pci_unmap_single(iadev->pci, desc->dma_addr,
2480                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2481                 kfree(desc->cpcs);
2482         }
2483         kfree(iadev->tx_buf);
2484         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2485                             iadev->tx_dle_dma);  
2486 }
2487
2488 static void ia_free_rx(IADEV *iadev)
2489 {
2490         kfree(iadev->rx_open);
2491         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2492                           iadev->rx_dle_dma);  
2493 }
2494
2495 static int __devinit ia_start(struct atm_dev *dev)
2496 {  
2497         IADEV *iadev;  
2498         int error;  
2499         unsigned char phy;  
2500         u32 ctrl_reg;  
2501         IF_EVENT(printk(">ia_start\n");)  
2502         iadev = INPH_IA_DEV(dev);  
2503         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2504                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2505                     dev->number, iadev->irq);  
2506                 error = -EAGAIN;
2507                 goto err_out;
2508         }  
2509         /* @@@ should release IRQ on error */  
2510         /* enabling memory + master */  
2511         if ((error = pci_write_config_word(iadev->pci,   
2512                                 PCI_COMMAND,   
2513                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2514         {  
2515                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2516                     "master (0x%x)\n",dev->number, error);  
2517                 error = -EIO;  
2518                 goto err_free_irq;
2519         }  
2520         udelay(10);  
2521   
2522         /* Maybe we should reset the front end, initialize Bus Interface Control   
2523                 Registers and see. */  
2524   
2525         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2526                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2527         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2528         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2529                         | CTRL_B8  
2530                         | CTRL_B16  
2531                         | CTRL_B32  
2532                         | CTRL_B48  
2533                         | CTRL_B64  
2534                         | CTRL_B128  
2535                         | CTRL_ERRMASK  
2536                         | CTRL_DLETMASK         /* shud be removed l8r */  
2537                         | CTRL_DLERMASK  
2538                         | CTRL_SEGMASK  
2539                         | CTRL_REASSMASK          
2540                         | CTRL_FEMASK  
2541                         | CTRL_CSPREEMPT;  
2542   
2543        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2544   
2545         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2546                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2547            printk("Bus status reg after init: %08x\n", 
2548                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2549     
2550         ia_hw_type(iadev); 
2551         error = tx_init(dev);  
2552         if (error)
2553                 goto err_free_irq;
2554         error = rx_init(dev);  
2555         if (error)
2556                 goto err_free_tx;
2557   
2558         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2559         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2560         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2561                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2562         phy = 0; /* resolve compiler complaint */
2563         IF_INIT ( 
2564         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2565                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2566         else  
2567                 printk("IA: utopia,rev.%0x\n",phy);) 
2568
2569         if (iadev->phy_type &  FE_25MBIT_PHY)
2570            ia_mb25_init(iadev);
2571         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2572            ia_suni_pm7345_init(iadev);
2573         else {
2574                 error = suni_init(dev);
2575                 if (error)
2576                         goto err_free_rx;
2577                 if (dev->phy->start) {
2578                         error = dev->phy->start(dev);
2579                         if (error)
2580                                 goto err_free_rx;
2581                 }
2582                 /* Get iadev->carrier_detect status */
2583                 ia_frontend_intr(iadev);
2584         }
2585         return 0;
2586
2587 err_free_rx:
2588         ia_free_rx(iadev);
2589 err_free_tx:
2590         ia_free_tx(iadev);
2591 err_free_irq:
2592         free_irq(iadev->irq, dev);  
2593 err_out:
2594         return error;
2595 }  
2596   
2597 static void ia_close(struct atm_vcc *vcc)  
2598 {
2599         DEFINE_WAIT(wait);
2600         u16 *vc_table;
2601         IADEV *iadev;
2602         struct ia_vcc *ia_vcc;
2603         struct sk_buff *skb = NULL;
2604         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2605         unsigned long closetime, flags;
2606
2607         iadev = INPH_IA_DEV(vcc->dev);
2608         ia_vcc = INPH_IA_VCC(vcc);
2609         if (!ia_vcc) return;  
2610
2611         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2612                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2613         clear_bit(ATM_VF_READY,&vcc->flags);
2614         skb_queue_head_init (&tmp_tx_backlog);
2615         skb_queue_head_init (&tmp_vcc_backlog); 
2616         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2617            iadev->close_pending++;
2618            prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2619            schedule_timeout(50);
2620            finish_wait(&iadev->timeout_wait, &wait);
2621            spin_lock_irqsave(&iadev->tx_lock, flags); 
2622            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2623               if (ATM_SKB(skb)->vcc == vcc){ 
2624                  if (vcc->pop) vcc->pop(vcc, skb);
2625                  else dev_kfree_skb_any(skb);
2626               }
2627               else 
2628                  skb_queue_tail(&tmp_tx_backlog, skb);
2629            } 
2630            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2631              skb_queue_tail(&iadev->tx_backlog, skb);
2632            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2633            closetime = 300000 / ia_vcc->pcr;
2634            if (closetime == 0)
2635               closetime = 1;
2636            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2637            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2638            spin_lock_irqsave(&iadev->tx_lock, flags);
2639            iadev->close_pending--;
2640            iadev->testTable[vcc->vci]->lastTime = 0;
2641            iadev->testTable[vcc->vci]->fract = 0; 
2642            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2643            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2644               if (vcc->qos.txtp.min_pcr > 0)
2645                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2646            }
2647            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2648               ia_vcc = INPH_IA_VCC(vcc); 
2649               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2650               ia_cbrVc_close (vcc);
2651            }
2652            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2653         }
2654         
2655         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2656            // reset reass table
2657            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2658            vc_table += vcc->vci; 
2659            *vc_table = NO_AAL5_PKT;
2660            // reset vc table
2661            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2662            vc_table += vcc->vci;
2663            *vc_table = (vcc->vci << 6) | 15;
2664            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2665               struct abr_vc_table __iomem *abr_vc_table = 
2666                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2667               abr_vc_table +=  vcc->vci;
2668               abr_vc_table->rdf = 0x0003;
2669               abr_vc_table->air = 0x5eb1;
2670            }                                 
2671            // Drain the packets
2672            rx_dle_intr(vcc->dev); 
2673            iadev->rx_open[vcc->vci] = NULL;
2674         }
2675         kfree(INPH_IA_VCC(vcc));  
2676         ia_vcc = NULL;
2677         vcc->dev_data = NULL;
2678         clear_bit(ATM_VF_ADDR,&vcc->flags);
2679         return;        
2680 }  
2681   
2682 static int ia_open(struct atm_vcc *vcc)
2683 {  
2684         struct ia_vcc *ia_vcc;  
2685         int error;  
2686         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2687         {  
2688                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2689                 vcc->dev_data = NULL;
2690         }  
2691         if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2692         {  
2693                 IF_EVENT(printk("iphase open: unspec part\n");)  
2694                 set_bit(ATM_VF_ADDR,&vcc->flags);
2695         }  
2696         if (vcc->qos.aal != ATM_AAL5)  
2697                 return -EINVAL;  
2698         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2699                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2700   
2701         /* Device dependent initialization */  
2702         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2703         if (!ia_vcc) return -ENOMEM;  
2704         vcc->dev_data = ia_vcc;
2705   
2706         if ((error = open_rx(vcc)))  
2707         {  
2708                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2709                 ia_close(vcc);  
2710                 return error;  
2711         }  
2712   
2713         if ((error = open_tx(vcc)))  
2714         {  
2715                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2716                 ia_close(vcc);  
2717                 return error;  
2718         }  
2719   
2720         set_bit(ATM_VF_READY,&vcc->flags);
2721
2722 #if 0
2723         {
2724            static u8 first = 1; 
2725            if (first) {
2726               ia_timer.expires = jiffies + 3*HZ;
2727               add_timer(&ia_timer);
2728               first = 0;
2729            }           
2730         }
2731 #endif
2732         IF_EVENT(printk("ia open returning\n");)  
2733         return 0;  
2734 }  
2735   
2736 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2737 {  
2738         IF_EVENT(printk(">ia_change_qos\n");)  
2739         return 0;  
2740 }  
2741   
2742 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2743 {  
2744    IA_CMDBUF ia_cmds;
2745    IADEV *iadev;
2746    int i, board;
2747    u16 __user *tmps;
2748    IF_EVENT(printk(">ia_ioctl\n");)  
2749    if (cmd != IA_CMD) {
2750       if (!dev->phy->ioctl) return -EINVAL;
2751       return dev->phy->ioctl(dev,cmd,arg);
2752    }
2753    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2754    board = ia_cmds.status;
2755    if ((board < 0) || (board > iadev_count))
2756          board = 0;    
2757    iadev = ia_dev[board];
2758    switch (ia_cmds.cmd) {
2759    case MEMDUMP:
2760    {
2761         switch (ia_cmds.sub_cmd) {
2762           case MEMDUMP_DEV:     
2763              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2764              if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2765                 return -EFAULT;
2766              ia_cmds.status = 0;
2767              break;
2768           case MEMDUMP_SEGREG:
2769              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2770              tmps = (u16 __user *)ia_cmds.buf;
2771              for(i=0; i<0x80; i+=2, tmps++)
2772                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2773              ia_cmds.status = 0;
2774              ia_cmds.len = 0x80;
2775              break;
2776           case MEMDUMP_REASSREG:
2777              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2778              tmps = (u16 __user *)ia_cmds.buf;
2779              for(i=0; i<0x80; i+=2, tmps++)
2780                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2781              ia_cmds.status = 0;
2782              ia_cmds.len = 0x80;
2783              break;
2784           case MEMDUMP_FFL:
2785           {  
2786              ia_regs_t       *regs_local;
2787              ffredn_t        *ffL;
2788              rfredn_t        *rfL;
2789                      
2790              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2791              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2792              if (!regs_local) return -ENOMEM;
2793              ffL = &regs_local->ffredn;
2794              rfL = &regs_local->rfredn;
2795              /* Copy real rfred registers into the local copy */
2796              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2797                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2798                 /* Copy real ffred registers into the local copy */
2799              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2800                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2801
2802              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2803                 kfree(regs_local);
2804                 return -EFAULT;
2805              }
2806              kfree(regs_local);
2807              printk("Board %d registers dumped\n", board);
2808              ia_cmds.status = 0;                  
2809          }      
2810              break;        
2811          case READ_REG:
2812          {  
2813              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2814              desc_dbg(iadev); 
2815              ia_cmds.status = 0; 
2816          }
2817              break;
2818          case 0x6:
2819          {  
2820              ia_cmds.status = 0; 
2821              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2822              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2823          }
2824              break;
2825          case 0x8:
2826          {
2827              struct k_sonet_stats *stats;
2828              stats = &PRIV(_ia_dev[board])->sonet_stats;
2829              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2830              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2831              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2832              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2833              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2834              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2835              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2836              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2837              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2838          }
2839             ia_cmds.status = 0;
2840             break;
2841          case 0x9:
2842             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2843             for (i = 1; i <= iadev->num_rx_desc; i++)
2844                free_desc(_ia_dev[board], i);
2845             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2846                                             iadev->reass_reg+REASS_MASK_REG);
2847             iadev->rxing = 1;
2848             
2849             ia_cmds.status = 0;
2850             break;
2851
2852          case 0xb:
2853             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2854             ia_frontend_intr(iadev);
2855             break;
2856          case 0xa:
2857             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2858          {  
2859              ia_cmds.status = 0; 
2860              IADebugFlag = ia_cmds.maddr;
2861              printk("New debug option loaded\n");
2862          }
2863              break;
2864          default:
2865              ia_cmds.status = 0;
2866              break;
2867       } 
2868    }
2869       break;
2870    default:
2871       break;
2872
2873    }    
2874    return 0;  
2875 }  
2876   
2877 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2878         void __user *optval, int optlen)  
2879 {  
2880         IF_EVENT(printk(">ia_getsockopt\n");)  
2881         return -EINVAL;  
2882 }  
2883   
2884 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2885         void __user *optval, unsigned int optlen)  
2886 {  
2887         IF_EVENT(printk(">ia_setsockopt\n");)  
2888         return -EINVAL;  
2889 }  
2890   
2891 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2892         IADEV *iadev;
2893         struct dle *wr_ptr;
2894         struct tx_buf_desc __iomem *buf_desc_ptr;
2895         int desc;
2896         int comp_code;
2897         int total_len;
2898         struct cpcs_trailer *trailer;
2899         struct ia_vcc *iavcc;
2900
2901         iadev = INPH_IA_DEV(vcc->dev);  
2902         iavcc = INPH_IA_VCC(vcc);
2903         if (!iavcc->txing) {
2904            printk("discard packet on closed VC\n");
2905            if (vcc->pop)
2906                 vcc->pop(vcc, skb);
2907            else
2908                 dev_kfree_skb_any(skb);
2909            return 0;
2910         }
2911
2912         if (skb->len > iadev->tx_buf_sz - 8) {
2913            printk("Transmit size over tx buffer size\n");
2914            if (vcc->pop)
2915                  vcc->pop(vcc, skb);
2916            else
2917                  dev_kfree_skb_any(skb);
2918           return 0;
2919         }
2920         if ((unsigned long)skb->data & 3) {
2921            printk("Misaligned SKB\n");
2922            if (vcc->pop)
2923                  vcc->pop(vcc, skb);
2924            else
2925                  dev_kfree_skb_any(skb);
2926            return 0;
2927         }       
2928         /* Get a descriptor number from our free descriptor queue  
2929            We get the descr number from the TCQ now, since I am using  
2930            the TCQ as a free buffer queue. Initially TCQ will be   
2931            initialized with all the descriptors and is hence, full.  
2932         */
2933         desc = get_desc (iadev, iavcc);
2934         if (desc == 0xffff) 
2935             return 1;
2936         comp_code = desc >> 13;  
2937         desc &= 0x1fff;  
2938   
2939         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2940         {  
2941                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2942                 atomic_inc(&vcc->stats->tx);
2943                 if (vcc->pop)   
2944                     vcc->pop(vcc, skb);   
2945                 else  
2946                     dev_kfree_skb_any(skb);
2947                 return 0;   /* return SUCCESS */
2948         }  
2949   
2950         if (comp_code)  
2951         {  
2952             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2953                                                             desc, comp_code);)  
2954         }  
2955        
2956         /* remember the desc and vcc mapping */
2957         iavcc->vc_desc_cnt++;
2958         iadev->desc_tbl[desc-1].iavcc = iavcc;
2959         iadev->desc_tbl[desc-1].txskb = skb;
2960         IA_SKB_STATE(skb) = 0;
2961
2962         iadev->ffL.tcq_rd += 2;
2963         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2964                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2965         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2966   
2967         /* Put the descriptor number in the packet ready queue  
2968                 and put the updated write pointer in the DLE field   
2969         */   
2970         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2971
2972         iadev->ffL.prq_wr += 2;
2973         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2974                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2975           
2976         /* Figure out the exact length of the packet and padding required to 
2977            make it  aligned on a 48 byte boundary.  */
2978         total_len = skb->len + sizeof(struct cpcs_trailer);  
2979         total_len = ((total_len + 47) / 48) * 48;
2980         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2981  
2982         /* Put the packet in a tx buffer */   
2983         trailer = iadev->tx_buf[desc-1].cpcs;
2984         IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2985                   skb, skb->data, skb->len, desc);)
2986         trailer->control = 0; 
2987         /*big endian*/ 
2988         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2989         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2990
2991         /* Display the packet */  
2992         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2993                                                         skb->len, tcnter++);  
2994         xdump(skb->data, skb->len, "TX: ");
2995         printk("\n");)
2996
2997         /* Build the buffer descriptor */  
2998         buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2999         buf_desc_ptr += desc;   /* points to the corresponding entry */  
3000         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
3001         /* Huh ? p.115 of users guide describes this as a read-only register */
3002         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3003         buf_desc_ptr->vc_index = vcc->vci;
3004         buf_desc_ptr->bytes = total_len;  
3005
3006         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
3007            clear_lockup (vcc, iadev);
3008
3009         /* Build the DLE structure */  
3010         wr_ptr = iadev->tx_dle_q.write;  
3011         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
3012         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3013                 skb->len, PCI_DMA_TODEVICE);
3014         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
3015                                                   buf_desc_ptr->buf_start_lo;  
3016         /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3017         wr_ptr->bytes = skb->len;  
3018
3019         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3020         if ((wr_ptr->bytes >> 2) == 0xb)
3021            wr_ptr->bytes = 0x30;
3022
3023         wr_ptr->mode = TX_DLE_PSI; 
3024         wr_ptr->prq_wr_ptr_data = 0;
3025   
3026         /* end is not to be used for the DLE q */  
3027         if (++wr_ptr == iadev->tx_dle_q.end)  
3028                 wr_ptr = iadev->tx_dle_q.start;  
3029         
3030         /* Build trailer dle */
3031         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3032         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3033           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3034
3035         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3036         wr_ptr->mode = DMA_INT_ENABLE; 
3037         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3038         
3039         /* end is not to be used for the DLE q */
3040         if (++wr_ptr == iadev->tx_dle_q.end)  
3041                 wr_ptr = iadev->tx_dle_q.start;
3042
3043         iadev->tx_dle_q.write = wr_ptr;  
3044         ATM_DESC(skb) = vcc->vci;
3045         skb_queue_tail(&iadev->tx_dma_q, skb);
3046
3047         atomic_inc(&vcc->stats->tx);
3048         iadev->tx_pkt_cnt++;
3049         /* Increment transaction counter */  
3050         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3051         
3052 #if 0        
3053         /* add flow control logic */ 
3054         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3055           if (iavcc->vc_desc_cnt > 10) {
3056              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3057             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3058               iavcc->flow_inc = -1;
3059               iavcc->saved_tx_quota = vcc->tx_quota;
3060            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3061              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3062              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3063               iavcc->flow_inc = 0;
3064            }
3065         }
3066 #endif
3067         IF_TX(printk("ia send done\n");)  
3068         return 0;  
3069 }  
3070
3071 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3072 {
3073         IADEV *iadev; 
3074         unsigned long flags;
3075
3076         iadev = INPH_IA_DEV(vcc->dev);
3077         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3078         {
3079             if (!skb)
3080                 printk(KERN_CRIT "null skb in ia_send\n");
3081             else dev_kfree_skb_any(skb);
3082             return -EINVAL;
3083         }                         
3084         spin_lock_irqsave(&iadev->tx_lock, flags); 
3085         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3086             dev_kfree_skb_any(skb);
3087             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3088             return -EINVAL; 
3089         }
3090         ATM_SKB(skb)->vcc = vcc;
3091  
3092         if (skb_peek(&iadev->tx_backlog)) {
3093            skb_queue_tail(&iadev->tx_backlog, skb);
3094         }
3095         else {
3096            if (ia_pkt_tx (vcc, skb)) {
3097               skb_queue_tail(&iadev->tx_backlog, skb);
3098            }
3099         }
3100         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3101         return 0;
3102
3103 }
3104
3105 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3106
3107   int   left = *pos, n;   
3108   char  *tmpPtr;
3109   IADEV *iadev = INPH_IA_DEV(dev);
3110   if(!left--) {
3111      if (iadev->phy_type == FE_25MBIT_PHY) {
3112        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3113        return n;
3114      }
3115      if (iadev->phy_type == FE_DS3_PHY)
3116         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3117      else if (iadev->phy_type == FE_E3_PHY)
3118         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3119      else if (iadev->phy_type == FE_UTP_OPTION)
3120          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3121      else
3122         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3123      tmpPtr = page + n;
3124      if (iadev->pci_map_size == 0x40000)
3125         n += sprintf(tmpPtr, "-1KVC-");
3126      else
3127         n += sprintf(tmpPtr, "-4KVC-");  
3128      tmpPtr = page + n; 
3129      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3130         n += sprintf(tmpPtr, "1M  \n");
3131      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3132         n += sprintf(tmpPtr, "512K\n");
3133      else
3134        n += sprintf(tmpPtr, "128K\n");
3135      return n;
3136   }
3137   if (!left) {
3138      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3139                            "  Size of Tx Buffer  :  %u\n"
3140                            "  Number of Rx Buffer:  %u\n"
3141                            "  Size of Rx Buffer  :  %u\n"
3142                            "  Packets Receiverd  :  %u\n"
3143                            "  Packets Transmitted:  %u\n"
3144                            "  Cells Received     :  %u\n"
3145                            "  Cells Transmitted  :  %u\n"
3146                            "  Board Dropped Cells:  %u\n"
3147                            "  Board Dropped Pkts :  %u\n",
3148                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3149                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3150                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3151                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3152                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3153   }
3154   return 0;
3155 }
3156   
3157 static const struct atmdev_ops ops = {  
3158         .open           = ia_open,  
3159         .close          = ia_close,  
3160         .ioctl          = ia_ioctl,  
3161         .getsockopt     = ia_getsockopt,  
3162         .setsockopt     = ia_setsockopt,  
3163         .send           = ia_send,  
3164         .phy_put        = ia_phy_put,  
3165         .phy_get        = ia_phy_get,  
3166         .change_qos     = ia_change_qos,  
3167         .proc_read      = ia_proc_read,
3168         .owner          = THIS_MODULE,
3169 };  
3170           
3171 static int __devinit ia_init_one(struct pci_dev *pdev,
3172                                  const struct pci_device_id *ent)
3173 {  
3174         struct atm_dev *dev;  
3175         IADEV *iadev;  
3176         int ret;
3177
3178         iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3179         if (!iadev) {
3180                 ret = -ENOMEM;
3181                 goto err_out;
3182         }
3183
3184         iadev->pci = pdev;
3185
3186         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3187                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3188         if (pci_enable_device(pdev)) {
3189                 ret = -ENODEV;
3190                 goto err_out_free_iadev;
3191         }
3192         dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3193         if (!dev) {
3194                 ret = -ENOMEM;
3195                 goto err_out_disable_dev;
3196         }
3197         dev->dev_data = iadev;
3198         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3199         IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3200                 iadev->LineRate);)
3201
3202         pci_set_drvdata(pdev, dev);
3203
3204         ia_dev[iadev_count] = iadev;
3205         _ia_dev[iadev_count] = dev;
3206         iadev_count++;
3207         if (ia_init(dev) || ia_start(dev)) {  
3208                 IF_INIT(printk("IA register failed!\n");)
3209                 iadev_count--;
3210                 ia_dev[iadev_count] = NULL;
3211                 _ia_dev[iadev_count] = NULL;
3212                 ret = -EINVAL;
3213                 goto err_out_deregister_dev;
3214         }
3215         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3216
3217         iadev->next_board = ia_boards;  
3218         ia_boards = dev;  
3219
3220         return 0;
3221
3222 err_out_deregister_dev:
3223         atm_dev_deregister(dev);  
3224 err_out_disable_dev:
3225         pci_disable_device(pdev);
3226 err_out_free_iadev:
3227         kfree(iadev);
3228 err_out:
3229         return ret;
3230 }
3231
3232 static void __devexit ia_remove_one(struct pci_dev *pdev)
3233 {
3234         struct atm_dev *dev = pci_get_drvdata(pdev);
3235         IADEV *iadev = INPH_IA_DEV(dev);
3236
3237         /* Disable phy interrupts */
3238         ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3239                                    SUNI_RSOP_CIE);
3240         udelay(1);
3241
3242         if (dev->phy && dev->phy->stop)
3243                 dev->phy->stop(dev);
3244
3245         /* De-register device */  
3246         free_irq(iadev->irq, dev);
3247         iadev_count--;
3248         ia_dev[iadev_count] = NULL;
3249         _ia_dev[iadev_count] = NULL;
3250         IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3251         atm_dev_deregister(dev);
3252
3253         iounmap(iadev->base);  
3254         pci_disable_device(pdev);
3255
3256         ia_free_rx(iadev);
3257         ia_free_tx(iadev);
3258
3259         kfree(iadev);
3260 }
3261
3262 static struct pci_device_id ia_pci_tbl[] = {
3263         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3264         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3265         { 0,}
3266 };
3267 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3268
3269 static struct pci_driver ia_driver = {
3270         .name =         DEV_LABEL,
3271         .id_table =     ia_pci_tbl,
3272         .probe =        ia_init_one,
3273         .remove =       __devexit_p(ia_remove_one),
3274 };
3275
3276 static int __init ia_module_init(void)
3277 {
3278         int ret;
3279
3280         ret = pci_register_driver(&ia_driver);
3281         if (ret >= 0) {
3282                 ia_timer.expires = jiffies + 3*HZ;
3283                 add_timer(&ia_timer); 
3284         } else
3285                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3286         return ret;
3287 }
3288
3289 static void __exit ia_module_exit(void)
3290 {
3291         pci_unregister_driver(&ia_driver);
3292
3293         del_timer(&ia_timer);
3294 }
3295
3296 module_init(ia_module_init);
3297 module_exit(ia_module_exit);