2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/delay.h>
15 #include <linux/etherdevice.h>
16 #include <linux/ethtool.h>
17 #include <linux/if_bridge.h>
18 #include <linux/jiffies.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/phy.h>
24 #include <net/switchdev.h>
25 #include "mv88e6xxx.h"
27 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
28 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
29 * will be directly accessible on some {device address,register address}
30 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
31 * will only respond to SMI transactions to that specific address, and
32 * an indirect addressing mechanism needs to be used to access its
35 static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
40 for (i = 0; i < 16; i++) {
41 ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
45 if ((ret & SMI_CMD_BUSY) == 0)
52 int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
57 return mdiobus_read_nested(bus, addr, reg);
59 /* Wait for the bus to become free. */
60 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
64 /* Transmit the read command. */
65 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
66 SMI_CMD_OP_22_READ | (addr << 5) | reg);
70 /* Wait for the read command to complete. */
71 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
76 ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
83 /* Must be called with SMI mutex held */
84 static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
86 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
92 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
96 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
102 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
104 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
107 mutex_lock(&ps->smi_mutex);
108 ret = _mv88e6xxx_reg_read(ds, addr, reg);
109 mutex_unlock(&ps->smi_mutex);
114 int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
120 return mdiobus_write_nested(bus, addr, reg, val);
122 /* Wait for the bus to become free. */
123 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
127 /* Transmit the data to write. */
128 ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
132 /* Transmit the write command. */
133 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
134 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
138 /* Wait for the write command to complete. */
139 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
146 /* Must be called with SMI mutex held */
147 static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
150 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
155 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
158 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
161 int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
163 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
166 mutex_lock(&ps->smi_mutex);
167 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
168 mutex_unlock(&ps->smi_mutex);
173 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
175 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
176 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
177 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
182 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
187 for (i = 0; i < 6; i++) {
190 /* Write the MAC address byte. */
191 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
192 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
194 /* Wait for the write to complete. */
195 for (j = 0; j < 16; j++) {
196 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
197 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
207 /* Must be called with SMI mutex held */
208 static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
211 return _mv88e6xxx_reg_read(ds, addr, regnum);
215 /* Must be called with SMI mutex held */
216 static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
220 return _mv88e6xxx_reg_write(ds, addr, regnum, val);
224 #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
225 static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
228 unsigned long timeout;
230 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
231 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
232 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
234 timeout = jiffies + 1 * HZ;
235 while (time_before(jiffies, timeout)) {
236 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
237 usleep_range(1000, 2000);
238 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
239 GLOBAL_STATUS_PPU_POLLING)
246 static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
249 unsigned long timeout;
251 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
252 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
254 timeout = jiffies + 1 * HZ;
255 while (time_before(jiffies, timeout)) {
256 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
257 usleep_range(1000, 2000);
258 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
259 GLOBAL_STATUS_PPU_POLLING)
266 static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
268 struct mv88e6xxx_priv_state *ps;
270 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
271 if (mutex_trylock(&ps->ppu_mutex)) {
272 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
274 if (mv88e6xxx_ppu_enable(ds) == 0)
275 ps->ppu_disabled = 0;
276 mutex_unlock(&ps->ppu_mutex);
280 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
282 struct mv88e6xxx_priv_state *ps = (void *)_ps;
284 schedule_work(&ps->ppu_work);
287 static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
289 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
292 mutex_lock(&ps->ppu_mutex);
294 /* If the PHY polling unit is enabled, disable it so that
295 * we can access the PHY registers. If it was already
296 * disabled, cancel the timer that is going to re-enable
299 if (!ps->ppu_disabled) {
300 ret = mv88e6xxx_ppu_disable(ds);
302 mutex_unlock(&ps->ppu_mutex);
305 ps->ppu_disabled = 1;
307 del_timer(&ps->ppu_timer);
314 static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
316 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
318 /* Schedule a timer to re-enable the PHY polling unit. */
319 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
320 mutex_unlock(&ps->ppu_mutex);
323 void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
325 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
327 mutex_init(&ps->ppu_mutex);
328 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
329 init_timer(&ps->ppu_timer);
330 ps->ppu_timer.data = (unsigned long)ps;
331 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
334 int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
338 ret = mv88e6xxx_ppu_access_get(ds);
340 ret = mv88e6xxx_reg_read(ds, addr, regnum);
341 mv88e6xxx_ppu_access_put(ds);
347 int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
352 ret = mv88e6xxx_ppu_access_get(ds);
354 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
355 mv88e6xxx_ppu_access_put(ds);
362 static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
364 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
367 case PORT_SWITCH_ID_6031:
368 case PORT_SWITCH_ID_6061:
369 case PORT_SWITCH_ID_6035:
370 case PORT_SWITCH_ID_6065:
376 static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
378 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
381 case PORT_SWITCH_ID_6092:
382 case PORT_SWITCH_ID_6095:
388 static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
390 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
393 case PORT_SWITCH_ID_6046:
394 case PORT_SWITCH_ID_6085:
395 case PORT_SWITCH_ID_6096:
396 case PORT_SWITCH_ID_6097:
402 static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
404 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
407 case PORT_SWITCH_ID_6123:
408 case PORT_SWITCH_ID_6161:
409 case PORT_SWITCH_ID_6165:
415 static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
417 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
420 case PORT_SWITCH_ID_6121:
421 case PORT_SWITCH_ID_6122:
422 case PORT_SWITCH_ID_6152:
423 case PORT_SWITCH_ID_6155:
424 case PORT_SWITCH_ID_6182:
425 case PORT_SWITCH_ID_6185:
426 case PORT_SWITCH_ID_6108:
427 case PORT_SWITCH_ID_6131:
433 static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
435 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
438 case PORT_SWITCH_ID_6320:
439 case PORT_SWITCH_ID_6321:
445 static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
447 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
450 case PORT_SWITCH_ID_6171:
451 case PORT_SWITCH_ID_6175:
452 case PORT_SWITCH_ID_6350:
453 case PORT_SWITCH_ID_6351:
459 static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
461 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
464 case PORT_SWITCH_ID_6172:
465 case PORT_SWITCH_ID_6176:
466 case PORT_SWITCH_ID_6240:
467 case PORT_SWITCH_ID_6352:
473 /* We expect the switch to perform auto negotiation if there is a real
474 * phy. However, in the case of a fixed link phy, we force the port
475 * settings from the fixed link settings.
477 void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
478 struct phy_device *phydev)
480 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
484 if (!phy_is_pseudo_fixed_link(phydev))
487 mutex_lock(&ps->smi_mutex);
489 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
493 reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
494 PORT_PCS_CTRL_FORCE_LINK |
495 PORT_PCS_CTRL_DUPLEX_FULL |
496 PORT_PCS_CTRL_FORCE_DUPLEX |
497 PORT_PCS_CTRL_UNFORCED);
499 reg |= PORT_PCS_CTRL_FORCE_LINK;
501 reg |= PORT_PCS_CTRL_LINK_UP;
503 if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
506 switch (phydev->speed) {
508 reg |= PORT_PCS_CTRL_1000;
511 reg |= PORT_PCS_CTRL_100;
514 reg |= PORT_PCS_CTRL_10;
517 pr_info("Unknown speed");
521 reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
522 if (phydev->duplex == DUPLEX_FULL)
523 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
525 if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
526 (port >= ps->num_ports - 2)) {
527 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
528 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
529 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
530 reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
531 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
532 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
533 PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
535 _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
538 mutex_unlock(&ps->smi_mutex);
541 /* Must be called with SMI mutex held */
542 static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
547 for (i = 0; i < 10; i++) {
548 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
549 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
556 /* Must be called with SMI mutex held */
557 static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
561 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
562 port = (port + 1) << 5;
564 /* Snapshot the hardware statistics counters for this port. */
565 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
566 GLOBAL_STATS_OP_CAPTURE_PORT |
567 GLOBAL_STATS_OP_HIST_RX_TX | port);
571 /* Wait for the snapshotting to complete. */
572 ret = _mv88e6xxx_stats_wait(ds);
579 /* Must be called with SMI mutex held */
580 static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
587 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
588 GLOBAL_STATS_OP_READ_CAPTURED |
589 GLOBAL_STATS_OP_HIST_RX_TX | stat);
593 ret = _mv88e6xxx_stats_wait(ds);
597 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
603 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
610 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
611 { "in_good_octets", 8, 0x00, },
612 { "in_bad_octets", 4, 0x02, },
613 { "in_unicast", 4, 0x04, },
614 { "in_broadcasts", 4, 0x06, },
615 { "in_multicasts", 4, 0x07, },
616 { "in_pause", 4, 0x16, },
617 { "in_undersize", 4, 0x18, },
618 { "in_fragments", 4, 0x19, },
619 { "in_oversize", 4, 0x1a, },
620 { "in_jabber", 4, 0x1b, },
621 { "in_rx_error", 4, 0x1c, },
622 { "in_fcs_error", 4, 0x1d, },
623 { "out_octets", 8, 0x0e, },
624 { "out_unicast", 4, 0x10, },
625 { "out_broadcasts", 4, 0x13, },
626 { "out_multicasts", 4, 0x12, },
627 { "out_pause", 4, 0x15, },
628 { "excessive", 4, 0x11, },
629 { "collisions", 4, 0x1e, },
630 { "deferred", 4, 0x05, },
631 { "single", 4, 0x14, },
632 { "multiple", 4, 0x17, },
633 { "out_fcs_error", 4, 0x03, },
634 { "late", 4, 0x1f, },
635 { "hist_64bytes", 4, 0x08, },
636 { "hist_65_127bytes", 4, 0x09, },
637 { "hist_128_255bytes", 4, 0x0a, },
638 { "hist_256_511bytes", 4, 0x0b, },
639 { "hist_512_1023bytes", 4, 0x0c, },
640 { "hist_1024_max_bytes", 4, 0x0d, },
641 /* Not all devices have the following counters */
642 { "sw_in_discards", 4, 0x110, },
643 { "sw_in_filtered", 2, 0x112, },
644 { "sw_out_filtered", 2, 0x113, },
648 static bool have_sw_in_discards(struct dsa_switch *ds)
650 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
653 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
654 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
655 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
656 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
657 case PORT_SWITCH_ID_6352:
664 static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
666 struct mv88e6xxx_hw_stat *stats,
667 int port, uint8_t *data)
671 for (i = 0; i < nr_stats; i++) {
672 memcpy(data + i * ETH_GSTRING_LEN,
673 stats[i].string, ETH_GSTRING_LEN);
677 static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
679 struct mv88e6xxx_hw_stat *stats,
682 struct mv88e6xxx_hw_stat *s = stats + stat;
688 if (s->reg >= 0x100) {
689 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
695 if (s->sizeof_stat == 4) {
696 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
703 _mv88e6xxx_stats_read(ds, s->reg, &low);
704 if (s->sizeof_stat == 8)
705 _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
707 value = (((u64)high) << 16) | low;
711 static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
713 struct mv88e6xxx_hw_stat *stats,
714 int port, uint64_t *data)
716 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
720 mutex_lock(&ps->smi_mutex);
722 ret = _mv88e6xxx_stats_snapshot(ds, port);
724 mutex_unlock(&ps->smi_mutex);
728 /* Read each of the counters. */
729 for (i = 0; i < nr_stats; i++)
730 data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
732 mutex_unlock(&ps->smi_mutex);
735 /* All the statistics in the table */
737 mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
739 if (have_sw_in_discards(ds))
740 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
741 mv88e6xxx_hw_stats, port, data);
743 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
744 mv88e6xxx_hw_stats, port, data);
747 int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
749 if (have_sw_in_discards(ds))
750 return ARRAY_SIZE(mv88e6xxx_hw_stats);
751 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
755 mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
756 int port, uint64_t *data)
758 if (have_sw_in_discards(ds))
759 _mv88e6xxx_get_ethtool_stats(
760 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
761 mv88e6xxx_hw_stats, port, data);
763 _mv88e6xxx_get_ethtool_stats(
764 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
765 mv88e6xxx_hw_stats, port, data);
768 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
770 return 32 * sizeof(u16);
773 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
774 struct ethtool_regs *regs, void *_p)
781 memset(p, 0xff, 32 * sizeof(u16));
783 for (i = 0; i < 32; i++) {
786 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
792 /* Must be called with SMI lock held */
793 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
796 unsigned long timeout = jiffies + HZ / 10;
798 while (time_before(jiffies, timeout)) {
801 ret = _mv88e6xxx_reg_read(ds, reg, offset);
807 usleep_range(1000, 2000);
812 static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
814 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
817 mutex_lock(&ps->smi_mutex);
818 ret = _mv88e6xxx_wait(ds, reg, offset, mask);
819 mutex_unlock(&ps->smi_mutex);
824 static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
826 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
827 GLOBAL2_SMI_OP_BUSY);
830 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
832 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
833 GLOBAL2_EEPROM_OP_LOAD);
836 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
838 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
839 GLOBAL2_EEPROM_OP_BUSY);
842 /* Must be called with SMI lock held */
843 static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
845 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
849 /* Must be called with SMI mutex held */
850 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
855 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
856 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
861 ret = _mv88e6xxx_phy_wait(ds);
865 return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
868 /* Must be called with SMI mutex held */
869 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
874 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
878 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
879 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
882 return _mv88e6xxx_phy_wait(ds);
885 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
887 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
890 mutex_lock(&ps->smi_mutex);
892 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
896 e->eee_enabled = !!(reg & 0x0200);
897 e->tx_lpi_enabled = !!(reg & 0x0100);
899 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
903 e->eee_active = !!(reg & PORT_STATUS_EEE);
907 mutex_unlock(&ps->smi_mutex);
911 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
912 struct phy_device *phydev, struct ethtool_eee *e)
914 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
918 mutex_lock(&ps->smi_mutex);
920 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
927 if (e->tx_lpi_enabled)
930 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
932 mutex_unlock(&ps->smi_mutex);
937 static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
941 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
945 return _mv88e6xxx_atu_wait(ds);
948 static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
949 struct mv88e6xxx_atu_entry *entry)
951 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
953 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
954 unsigned int mask, shift;
957 data |= GLOBAL_ATU_DATA_TRUNK;
958 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
959 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
961 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
962 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
965 data |= (entry->portv_trunkid << shift) & mask;
968 return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
971 static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
972 struct mv88e6xxx_atu_entry *entry,
978 err = _mv88e6xxx_atu_wait(ds);
982 err = _mv88e6xxx_atu_data_write(ds, entry);
987 err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
992 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
993 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
995 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
996 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
999 return _mv88e6xxx_atu_cmd(ds, op);
1002 static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
1004 struct mv88e6xxx_atu_entry entry = {
1006 .state = 0, /* EntryState bits must be 0 */
1009 return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
1012 static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
1013 int to_port, bool static_too)
1015 struct mv88e6xxx_atu_entry entry = {
1020 /* EntryState bits must be 0xF */
1021 entry.state = GLOBAL_ATU_DATA_STATE_MASK;
1023 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1024 entry.portv_trunkid = (to_port & 0x0f) << 4;
1025 entry.portv_trunkid |= from_port & 0x0f;
1027 return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
1030 static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
1033 /* Destination port 0xF means remove the entries */
1034 return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
1037 static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
1039 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1043 mutex_lock(&ps->smi_mutex);
1045 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
1051 oldstate = reg & PORT_CONTROL_STATE_MASK;
1052 if (oldstate != state) {
1053 /* Flush forwarding database if we're moving a port
1054 * from Learning or Forwarding state to Disabled or
1055 * Blocking or Listening state.
1057 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
1058 state <= PORT_CONTROL_STATE_BLOCKING) {
1059 ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
1063 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1064 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
1069 mutex_unlock(&ps->smi_mutex);
1073 static int _mv88e6xxx_port_vlan_map_set(struct dsa_switch *ds, int port,
1076 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1077 const u16 mask = (1 << ps->num_ports) - 1;
1080 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
1085 reg |= output_ports & mask;
1087 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
1090 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1092 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1096 case BR_STATE_DISABLED:
1097 stp_state = PORT_CONTROL_STATE_DISABLED;
1099 case BR_STATE_BLOCKING:
1100 case BR_STATE_LISTENING:
1101 stp_state = PORT_CONTROL_STATE_BLOCKING;
1103 case BR_STATE_LEARNING:
1104 stp_state = PORT_CONTROL_STATE_LEARNING;
1106 case BR_STATE_FORWARDING:
1108 stp_state = PORT_CONTROL_STATE_FORWARDING;
1112 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1114 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1115 * so we can not update the port state directly but need to schedule it.
1117 ps->port_state[port] = stp_state;
1118 set_bit(port, &ps->port_state_update_mask);
1119 schedule_work(&ps->bridge_work);
1124 static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
1128 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
1132 *pvid = ret & PORT_DEFAULT_VLAN_MASK;
1137 int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
1141 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
1145 *pvid = ret & PORT_DEFAULT_VLAN_MASK;
1150 static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
1152 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1153 pvid & PORT_DEFAULT_VLAN_MASK);
1156 static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
1158 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
1159 GLOBAL_VTU_OP_BUSY);
1162 static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
1166 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
1170 return _mv88e6xxx_vtu_wait(ds);
1173 static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
1177 ret = _mv88e6xxx_vtu_wait(ds);
1181 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
1184 static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
1185 struct mv88e6xxx_vtu_stu_entry *entry,
1186 unsigned int nibble_offset)
1188 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1193 for (i = 0; i < 3; ++i) {
1194 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1195 GLOBAL_VTU_DATA_0_3 + i);
1202 for (i = 0; i < ps->num_ports; ++i) {
1203 unsigned int shift = (i % 4) * 4 + nibble_offset;
1204 u16 reg = regs[i / 4];
1206 entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1212 static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
1213 struct mv88e6xxx_vtu_stu_entry *entry,
1214 unsigned int nibble_offset)
1216 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1217 u16 regs[3] = { 0 };
1221 for (i = 0; i < ps->num_ports; ++i) {
1222 unsigned int shift = (i % 4) * 4 + nibble_offset;
1223 u8 data = entry->data[i];
1225 regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1228 for (i = 0; i < 3; ++i) {
1229 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
1230 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1238 static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
1240 return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
1241 vid & GLOBAL_VTU_VID_MASK);
1244 static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
1245 struct mv88e6xxx_vtu_stu_entry *entry)
1247 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1250 ret = _mv88e6xxx_vtu_wait(ds);
1254 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
1258 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1262 next.vid = ret & GLOBAL_VTU_VID_MASK;
1263 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1266 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
1270 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1271 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1272 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1277 next.fid = ret & GLOBAL_VTU_FID_MASK;
1279 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1284 next.sid = ret & GLOBAL_VTU_SID_MASK;
1292 static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
1293 struct mv88e6xxx_vtu_stu_entry *entry)
1298 ret = _mv88e6xxx_vtu_wait(ds);
1305 /* Write port member tags */
1306 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
1310 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1311 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1312 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1313 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1317 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1318 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1323 reg = GLOBAL_VTU_VID_VALID;
1325 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1326 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1330 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
1333 static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
1334 struct mv88e6xxx_vtu_stu_entry *entry)
1336 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1339 ret = _mv88e6xxx_vtu_wait(ds);
1343 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
1344 sid & GLOBAL_VTU_SID_MASK);
1348 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
1352 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
1356 next.sid = ret & GLOBAL_VTU_SID_MASK;
1358 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1362 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1365 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
1374 static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
1375 struct mv88e6xxx_vtu_stu_entry *entry)
1380 ret = _mv88e6xxx_vtu_wait(ds);
1387 /* Write port states */
1388 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
1392 reg = GLOBAL_VTU_VID_VALID;
1394 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1398 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1399 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1403 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1406 static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
1407 struct mv88e6xxx_vtu_stu_entry *entry)
1409 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1410 struct mv88e6xxx_vtu_stu_entry vlan = {
1413 .fid = vid, /* We use one FID per VLAN */
1417 /* exclude all ports except the CPU */
1418 for (i = 0; i < ps->num_ports; ++i)
1419 vlan.data[i] = dsa_is_cpu_port(ds, i) ?
1420 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
1421 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1423 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1424 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1425 struct mv88e6xxx_vtu_stu_entry vstp;
1428 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1429 * implemented, only one STU entry is needed to cover all VTU
1430 * entries. Thus, validate the SID 0.
1433 err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
1437 if (vstp.sid != vlan.sid || !vstp.valid) {
1438 memset(&vstp, 0, sizeof(vstp));
1440 vstp.sid = vlan.sid;
1442 err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
1447 /* Clear all MAC addresses from the new database */
1448 err = _mv88e6xxx_atu_flush(ds, vlan.fid, true);
1457 int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
1458 const struct switchdev_obj_port_vlan *vlan,
1459 struct switchdev_trans *trans)
1461 /* We don't need any dynamic resource from the kernel (yet),
1462 * so skip the prepare phase.
1467 static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
1470 struct mv88e6xxx_vtu_stu_entry vlan;
1473 err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
1477 err = _mv88e6xxx_vtu_getnext(ds, &vlan);
1481 if (vlan.vid != vid || !vlan.valid) {
1482 err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
1487 vlan.data[port] = untagged ?
1488 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
1489 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
1491 return _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1494 int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
1495 const struct switchdev_obj_port_vlan *vlan,
1496 struct switchdev_trans *trans)
1498 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1499 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1500 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1504 mutex_lock(&ps->smi_mutex);
1506 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1507 err = _mv88e6xxx_port_vlan_add(ds, port, vid, untagged);
1512 /* no PVID with ranges, otherwise it's a bug */
1514 err = _mv88e6xxx_port_pvid_set(ds, port, vid);
1516 mutex_unlock(&ps->smi_mutex);
1521 static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1523 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1524 struct mv88e6xxx_vtu_stu_entry vlan;
1527 err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
1531 err = _mv88e6xxx_vtu_getnext(ds, &vlan);
1535 if (vlan.vid != vid || !vlan.valid ||
1536 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1539 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1541 /* keep the VLAN unless all ports are excluded */
1543 for (i = 0; i < ps->num_ports; ++i) {
1544 if (dsa_is_cpu_port(ds, i))
1547 if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1553 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1557 return _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
1560 int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1561 const struct switchdev_obj_port_vlan *vlan)
1563 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1567 mutex_lock(&ps->smi_mutex);
1569 err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
1573 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1574 err = _mv88e6xxx_port_vlan_del(ds, port, vid);
1579 err = _mv88e6xxx_port_pvid_set(ds, port, 0);
1586 mutex_unlock(&ps->smi_mutex);
1591 int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
1592 unsigned long *ports, unsigned long *untagged)
1594 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1595 struct mv88e6xxx_vtu_stu_entry next;
1602 mutex_lock(&ps->smi_mutex);
1603 err = _mv88e6xxx_vtu_vid_write(ds, *vid);
1607 err = _mv88e6xxx_vtu_getnext(ds, &next);
1609 mutex_unlock(&ps->smi_mutex);
1619 for (port = 0; port < ps->num_ports; ++port) {
1620 clear_bit(port, ports);
1621 clear_bit(port, untagged);
1623 if (dsa_is_cpu_port(ds, port))
1626 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
1627 next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1628 set_bit(port, ports);
1630 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1631 set_bit(port, untagged);
1637 static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
1638 const unsigned char *addr)
1642 for (i = 0; i < 3; i++) {
1643 ret = _mv88e6xxx_reg_write(
1644 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1645 (addr[i * 2] << 8) | addr[i * 2 + 1]);
1653 static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
1657 for (i = 0; i < 3; i++) {
1658 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1659 GLOBAL_ATU_MAC_01 + i);
1662 addr[i * 2] = ret >> 8;
1663 addr[i * 2 + 1] = ret & 0xff;
1669 static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
1670 struct mv88e6xxx_atu_entry *entry)
1674 ret = _mv88e6xxx_atu_wait(ds);
1678 ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
1682 ret = _mv88e6xxx_atu_data_write(ds, entry);
1686 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
1690 return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
1693 static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
1694 const unsigned char *addr, u16 vid,
1697 struct mv88e6xxx_atu_entry entry = { 0 };
1699 entry.fid = vid; /* We use one FID per VLAN */
1700 entry.state = state;
1701 ether_addr_copy(entry.mac, addr);
1702 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1703 entry.trunk = false;
1704 entry.portv_trunkid = BIT(port);
1707 return _mv88e6xxx_atu_load(ds, &entry);
1710 int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
1711 const struct switchdev_obj_port_fdb *fdb,
1712 struct switchdev_trans *trans)
1714 /* We don't use per-port FDB */
1718 /* We don't need any dynamic resource from the kernel (yet),
1719 * so skip the prepare phase.
1724 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1725 const struct switchdev_obj_port_fdb *fdb,
1726 struct switchdev_trans *trans)
1728 int state = is_multicast_ether_addr(fdb->addr) ?
1729 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1730 GLOBAL_ATU_DATA_STATE_UC_STATIC;
1731 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1734 mutex_lock(&ps->smi_mutex);
1735 ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
1736 mutex_unlock(&ps->smi_mutex);
1741 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1742 const struct switchdev_obj_port_fdb *fdb)
1744 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1747 mutex_lock(&ps->smi_mutex);
1748 ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
1749 GLOBAL_ATU_DATA_STATE_UNUSED);
1750 mutex_unlock(&ps->smi_mutex);
1755 static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
1756 struct mv88e6xxx_atu_entry *entry)
1758 struct mv88e6xxx_atu_entry next = { 0 };
1763 ret = _mv88e6xxx_atu_wait(ds);
1767 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
1771 ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
1775 ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
1779 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1783 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1784 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1785 unsigned int mask, shift;
1787 if (ret & GLOBAL_ATU_DATA_TRUNK) {
1789 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1790 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1793 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1794 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1797 next.portv_trunkid = (ret & mask) >> shift;
1804 int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
1805 struct switchdev_obj_port_fdb *fdb,
1806 int (*cb)(struct switchdev_obj *obj))
1808 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1809 struct mv88e6xxx_vtu_stu_entry vlan = {
1810 .vid = GLOBAL_VTU_VID_MASK, /* all ones */
1814 mutex_lock(&ps->smi_mutex);
1816 err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
1821 struct mv88e6xxx_atu_entry addr = {
1822 .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
1825 err = _mv88e6xxx_vtu_getnext(ds, &vlan);
1832 err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
1837 err = _mv88e6xxx_atu_getnext(ds, vlan.fid, &addr);
1841 if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
1844 if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
1845 bool is_static = addr.state ==
1846 (is_multicast_ether_addr(addr.mac) ?
1847 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1848 GLOBAL_ATU_DATA_STATE_UC_STATIC);
1850 fdb->vid = vlan.vid;
1851 ether_addr_copy(fdb->addr, addr.mac);
1852 fdb->ndm_state = is_static ? NUD_NOARP :
1855 err = cb(&fdb->obj);
1859 } while (!is_broadcast_ether_addr(addr.mac));
1861 } while (vlan.vid < GLOBAL_VTU_VID_MASK);
1864 mutex_unlock(&ps->smi_mutex);
1869 static void mv88e6xxx_bridge_work(struct work_struct *work)
1871 struct mv88e6xxx_priv_state *ps;
1872 struct dsa_switch *ds;
1875 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
1876 ds = ((struct dsa_switch *)ps) - 1;
1878 while (ps->port_state_update_mask) {
1879 port = __ffs(ps->port_state_update_mask);
1880 clear_bit(port, &ps->port_state_update_mask);
1881 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
1885 static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
1887 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1891 mutex_lock(&ps->smi_mutex);
1893 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1894 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1895 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
1896 mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
1897 /* MAC Forcing register: don't force link, speed,
1898 * duplex or flow control state to any particular
1899 * values on physical ports, but force the CPU port
1900 * and all DSA ports to their maximum bandwidth and
1903 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
1904 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
1905 reg &= ~PORT_PCS_CTRL_UNFORCED;
1906 reg |= PORT_PCS_CTRL_FORCE_LINK |
1907 PORT_PCS_CTRL_LINK_UP |
1908 PORT_PCS_CTRL_DUPLEX_FULL |
1909 PORT_PCS_CTRL_FORCE_DUPLEX;
1910 if (mv88e6xxx_6065_family(ds))
1911 reg |= PORT_PCS_CTRL_100;
1913 reg |= PORT_PCS_CTRL_1000;
1915 reg |= PORT_PCS_CTRL_UNFORCED;
1918 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1919 PORT_PCS_CTRL, reg);
1924 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
1925 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
1926 * tunneling, determine priority by looking at 802.1p and IP
1927 * priority fields (IP prio has precedence), and set STP state
1930 * If this is the CPU link, use DSA or EDSA tagging depending
1931 * on which tagging mode was configured.
1933 * If this is a link to another switch, use DSA tagging mode.
1935 * If this is the upstream port for this switch, enable
1936 * forwarding of unknown unicasts and multicasts.
1939 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1940 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1941 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
1942 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
1943 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
1944 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
1945 PORT_CONTROL_STATE_FORWARDING;
1946 if (dsa_is_cpu_port(ds, port)) {
1947 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
1948 reg |= PORT_CONTROL_DSA_TAG;
1949 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1950 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1951 mv88e6xxx_6320_family(ds)) {
1952 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
1953 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
1955 reg |= PORT_CONTROL_FRAME_MODE_DSA;
1956 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
1957 PORT_CONTROL_FORWARD_UNKNOWN_MC;
1960 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1961 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1962 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
1963 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
1964 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
1965 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
1968 if (dsa_is_dsa_port(ds, port)) {
1969 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
1970 reg |= PORT_CONTROL_DSA_TAG;
1971 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1972 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1973 mv88e6xxx_6320_family(ds)) {
1974 reg |= PORT_CONTROL_FRAME_MODE_DSA;
1977 if (port == dsa_upstream_port(ds))
1978 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
1979 PORT_CONTROL_FORWARD_UNKNOWN_MC;
1982 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1988 /* Port Control 2: don't force a good FCS, set the maximum frame size to
1989 * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
1990 * untagged frames on this port, do a destination address lookup on all
1991 * received packets as usual, disable ARP mirroring and don't send a
1992 * copy of all transmitted/received frames on this port to the CPU.
1995 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1996 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1997 mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
1998 reg = PORT_CONTROL_2_MAP_DA;
2000 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2001 mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
2002 reg |= PORT_CONTROL_2_JUMBO_10240;
2004 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
2005 /* Set the upstream port this port should use */
2006 reg |= dsa_upstream_port(ds);
2007 /* enable forwarding of unknown multicast addresses to
2010 if (port == dsa_upstream_port(ds))
2011 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
2014 reg |= PORT_CONTROL_2_8021Q_SECURE;
2017 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2018 PORT_CONTROL_2, reg);
2023 /* Port Association Vector: when learning source addresses
2024 * of packets, add the address to the address database using
2025 * a port bitmap that has only the bit for this port set and
2026 * the other bits clear.
2028 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
2033 /* Egress rate control 2: disable egress rate control. */
2034 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
2039 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2040 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2041 mv88e6xxx_6320_family(ds)) {
2042 /* Do not limit the period of time that this port can
2043 * be paused for by the remote end or the period of
2044 * time that this port can pause the remote end.
2046 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2047 PORT_PAUSE_CTRL, 0x0000);
2051 /* Port ATU control: disable limiting the number of
2052 * address database entries that this port is allowed
2055 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2056 PORT_ATU_CONTROL, 0x0000);
2057 /* Priority Override: disable DA, SA and VTU priority
2060 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2061 PORT_PRI_OVERRIDE, 0x0000);
2065 /* Port Ethertype: use the Ethertype DSA Ethertype
2068 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2069 PORT_ETH_TYPE, ETH_P_EDSA);
2072 /* Tag Remap: use an identity 802.1p prio -> switch
2075 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2076 PORT_TAG_REGMAP_0123, 0x3210);
2080 /* Tag Remap 2: use an identity 802.1p prio -> switch
2083 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2084 PORT_TAG_REGMAP_4567, 0x7654);
2089 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2090 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2091 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2092 mv88e6xxx_6320_family(ds)) {
2093 /* Rate Control: disable ingress rate limiting. */
2094 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2095 PORT_RATE_CONTROL, 0x0001);
2100 /* Port Control 1: disable trunking, disable sending
2101 * learning messages to this port.
2103 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
2107 /* Port based VLAN map: do not give each port its own address
2108 * database, and allow every port to egress frames on all other ports.
2110 reg = BIT(ps->num_ports) - 1; /* all ports */
2111 ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port);
2115 /* Default VLAN ID and priority: don't set a default VLAN
2116 * ID, and set the default packet priority to zero.
2118 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
2121 mutex_unlock(&ps->smi_mutex);
2125 int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2127 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2131 for (i = 0; i < ps->num_ports; i++) {
2132 ret = mv88e6xxx_setup_port(ds, i);
2139 int mv88e6xxx_setup_common(struct dsa_switch *ds)
2141 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2143 mutex_init(&ps->smi_mutex);
2145 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
2147 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
2152 int mv88e6xxx_setup_global(struct dsa_switch *ds)
2154 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2158 /* Set the default address aging time to 5 minutes, and
2159 * enable address learn messages to be sent to all message
2162 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
2163 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2165 /* Configure the IP ToS mapping registers. */
2166 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2167 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2168 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2169 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2170 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2171 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2172 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2173 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2175 /* Configure the IEEE 802.1p priority mapping register. */
2176 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2178 /* Send all frames with destination addresses matching
2179 * 01:80:c2:00:00:0x to the CPU port.
2181 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2183 /* Ignore removed tag data on doubly tagged packets, disable
2184 * flow control messages, force flow control priority to the
2185 * highest, and send all special multicast frames to the CPU
2186 * port at the highest priority.
2188 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2189 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
2190 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
2192 /* Program the DSA routing table. */
2193 for (i = 0; i < 32; i++) {
2196 if (ds->pd->rtable &&
2197 i != ds->index && i < ds->dst->pd->nr_chips)
2198 nexthop = ds->pd->rtable[i] & 0x1f;
2200 REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2201 GLOBAL2_DEVICE_MAPPING_UPDATE |
2202 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
2206 /* Clear all trunk masks. */
2207 for (i = 0; i < 8; i++)
2208 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2209 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2210 ((1 << ps->num_ports) - 1));
2212 /* Clear all trunk mappings. */
2213 for (i = 0; i < 16; i++)
2214 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
2215 GLOBAL2_TRUNK_MAPPING_UPDATE |
2216 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2218 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2219 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2220 mv88e6xxx_6320_family(ds)) {
2221 /* Send all frames with destination addresses matching
2222 * 01:80:c2:00:00:2x to the CPU port.
2224 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
2226 /* Initialise cross-chip port VLAN table to reset
2229 REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
2231 /* Clear the priority override table. */
2232 for (i = 0; i < 16; i++)
2233 REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
2237 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2238 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2239 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2240 mv88e6xxx_6320_family(ds)) {
2241 /* Disable ingress rate limiting by resetting all
2242 * ingress rate limit registers to their initial
2245 for (i = 0; i < ps->num_ports; i++)
2246 REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
2250 /* Clear the statistics counters for all ports */
2251 REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
2253 /* Wait for the flush to complete. */
2254 mutex_lock(&ps->smi_mutex);
2255 ret = _mv88e6xxx_stats_wait(ds);
2259 /* Clear all ATU entries */
2260 ret = _mv88e6xxx_atu_flush(ds, 0, true);
2264 /* Clear all the VTU and STU entries */
2265 ret = _mv88e6xxx_vtu_stu_flush(ds);
2267 mutex_unlock(&ps->smi_mutex);
2272 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
2274 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2275 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2276 unsigned long timeout;
2280 /* Set all ports to the disabled state. */
2281 for (i = 0; i < ps->num_ports; i++) {
2282 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
2283 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
2286 /* Wait for transmit queues to drain. */
2287 usleep_range(2000, 4000);
2289 /* Reset the switch. Keep the PPU active if requested. The PPU
2290 * needs to be active to support indirect phy register access
2291 * through global registers 0x18 and 0x19.
2294 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
2296 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
2298 /* Wait up to one second for reset to complete. */
2299 timeout = jiffies + 1 * HZ;
2300 while (time_before(jiffies, timeout)) {
2301 ret = REG_READ(REG_GLOBAL, 0x00);
2302 if ((ret & is_reset) == is_reset)
2304 usleep_range(1000, 2000);
2306 if (time_after(jiffies, timeout))
2312 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2314 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2317 mutex_lock(&ps->smi_mutex);
2318 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2321 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
2323 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2324 mutex_unlock(&ps->smi_mutex);
2328 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2331 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2334 mutex_lock(&ps->smi_mutex);
2335 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2339 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
2341 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2342 mutex_unlock(&ps->smi_mutex);
2346 static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
2348 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2350 if (port >= 0 && port < ps->num_ports)
2356 mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
2358 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2359 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2365 mutex_lock(&ps->smi_mutex);
2366 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
2367 mutex_unlock(&ps->smi_mutex);
2372 mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
2374 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2375 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2381 mutex_lock(&ps->smi_mutex);
2382 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
2383 mutex_unlock(&ps->smi_mutex);
2388 mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
2390 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2391 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2397 mutex_lock(&ps->smi_mutex);
2398 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
2399 mutex_unlock(&ps->smi_mutex);
2404 mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
2407 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2408 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2414 mutex_lock(&ps->smi_mutex);
2415 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
2416 mutex_unlock(&ps->smi_mutex);
2420 #ifdef CONFIG_NET_DSA_HWMON
2422 static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
2424 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2430 mutex_lock(&ps->smi_mutex);
2432 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
2436 /* Enable temperature sensor */
2437 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2441 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
2445 /* Wait for temperature to stabilize */
2446 usleep_range(10000, 12000);
2448 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2454 /* Disable temperature sensor */
2455 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
2459 *temp = ((val & 0x1f) - 5) * 5;
2462 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
2463 mutex_unlock(&ps->smi_mutex);
2467 static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
2469 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2474 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
2478 *temp = (ret & 0xff) - 25;
2483 int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
2485 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
2486 return mv88e63xx_get_temp(ds, temp);
2488 return mv88e61xx_get_temp(ds, temp);
2491 int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
2493 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2496 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2501 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2505 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
2510 int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
2512 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2515 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2518 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2521 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
2522 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
2523 (ret & 0xe0ff) | (temp << 8));
2526 int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
2528 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2531 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2536 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2540 *alarm = !!(ret & 0x40);
2544 #endif /* CONFIG_NET_DSA_HWMON */
2546 static int __init mv88e6xxx_init(void)
2548 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2549 register_switch_driver(&mv88e6131_switch_driver);
2551 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2552 register_switch_driver(&mv88e6123_61_65_switch_driver);
2554 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2555 register_switch_driver(&mv88e6352_switch_driver);
2557 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2558 register_switch_driver(&mv88e6171_switch_driver);
2562 module_init(mv88e6xxx_init);
2564 static void __exit mv88e6xxx_cleanup(void)
2566 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2567 unregister_switch_driver(&mv88e6171_switch_driver);
2569 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2570 unregister_switch_driver(&mv88e6352_switch_driver);
2572 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2573 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
2575 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2576 unregister_switch_driver(&mv88e6131_switch_driver);
2579 module_exit(mv88e6xxx_cleanup);
2581 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
2582 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
2583 MODULE_LICENSE("GPL");