Newer
Older
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/dsa/dsa2.c - Hardware switch handling, binding version 2
* Copyright (c) 2008-2009 Marvell Semiconductor
* Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
* Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include "dsa_priv.h"
static DEFINE_MUTEX(dsa2_mutex);
Vladimir Oltean
committed
LIST_HEAD(dsa_tree_list);
/* Track the bridges with forwarding offload enabled */
static unsigned long dsa_fwd_offloading_bridges;
/**
* dsa_tree_notify - Execute code for all switches in a DSA switch tree.
* @dst: collection of struct dsa_switch devices to notify.
* @e: event, must be of type DSA_NOTIFIER_*
* @v: event-specific value.
*
* Given a struct dsa_switch_tree, this can be used to run a function once for
* each member DSA switch. The other alternative of traversing the tree is only
* through its ports list, which does not uniquely list the switches.
*/
int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
{
struct raw_notifier_head *nh = &dst->nh;
int err;
err = raw_notifier_call_chain(nh, e, v);
return notifier_to_errno(err);
}
/**
* dsa_broadcast - Notify all DSA trees in the system.
* @e: event, must be of type DSA_NOTIFIER_*
* @v: event-specific value.
*
* Can be used to notify the switching fabric of events such as cross-chip
* bridging between disjoint trees (such as islands of tagger-compatible
* switches bridged by an incompatible middle switch).
*
* WARNING: this function is not reliable during probe time, because probing
* between trees is asynchronous and not all DSA trees might have probed.
*/
int dsa_broadcast(unsigned long e, void *v)
{
struct dsa_switch_tree *dst;
int err = 0;
list_for_each_entry(dst, &dsa_tree_list, list) {
err = dsa_tree_notify(dst, e, v);
if (err)
break;
}
return err;
}
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
/**
* dsa_lag_map() - Map LAG netdev to a linear LAG ID
* @dst: Tree in which to record the mapping.
* @lag: Netdev that is to be mapped to an ID.
*
* dsa_lag_id/dsa_lag_dev can then be used to translate between the
* two spaces. The size of the mapping space is determined by the
* driver by setting ds->num_lag_ids. It is perfectly legal to leave
* it unset if it is not needed, in which case these functions become
* no-ops.
*/
void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
{
unsigned int id;
if (dsa_lag_id(dst, lag) >= 0)
/* Already mapped */
return;
for (id = 0; id < dst->lags_len; id++) {
if (!dsa_lag_dev(dst, id)) {
dst->lags[id] = lag;
return;
}
}
/* No IDs left, which is OK. Some drivers do not need it. The
* ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
* returns an error for this device when joining the LAG. The
* driver can then return -EOPNOTSUPP back to DSA, which will
* fall back to a software LAG.
*/
}
/**
* dsa_lag_unmap() - Remove a LAG ID mapping
* @dst: Tree in which the mapping is recorded.
* @lag: Netdev that was mapped.
*
* As there may be multiple users of the mapping, it is only removed
* if there are no other references to it.
*/
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
{
struct dsa_port *dp;
unsigned int id;
dsa_lag_foreach_port(dp, dst, lag)
/* There are remaining users of this mapping */
return;
dsa_lags_foreach_id(id, dst) {
if (dsa_lag_dev(dst, id) == lag) {
dst->lags[id] = NULL;
break;
}
}
}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
static int dsa_bridge_num_find(const struct net_device *bridge_dev)
{
struct dsa_switch_tree *dst;
struct dsa_port *dp;
/* When preparing the offload for a port, it will have a valid
* dp->bridge_dev pointer but a not yet valid dp->bridge_num.
* However there might be other ports having the same dp->bridge_dev
* and a valid dp->bridge_num, so just ignore this port.
*/
list_for_each_entry(dst, &dsa_tree_list, list)
list_for_each_entry(dp, &dst->ports, list)
if (dp->bridge_dev == bridge_dev &&
dp->bridge_num != -1)
return dp->bridge_num;
return -1;
}
int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
{
int bridge_num = dsa_bridge_num_find(bridge_dev);
if (bridge_num < 0) {
/* First port that offloads TX forwarding for this bridge */
bridge_num = find_first_zero_bit(&dsa_fwd_offloading_bridges,
DSA_MAX_NUM_OFFLOADING_BRIDGES);
if (bridge_num >= max)
return -1;
set_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
return bridge_num;
}
void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
{
/* Check if the bridge is still in use, otherwise it is time
* to clean it up so we can reuse this bridge_num later.
*/
Vladimir Oltean
committed
if (dsa_bridge_num_find(bridge_dev) < 0)
clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
{
struct dsa_switch_tree *dst;
struct dsa_port *dp;
list_for_each_entry(dst, &dsa_tree_list, list) {
if (dst->index != tree_index)
continue;
list_for_each_entry(dp, &dst->ports, list) {
if (dp->ds->index != sw_index)
continue;
return dp->ds;
}
}
return NULL;
}
EXPORT_SYMBOL_GPL(dsa_switch_find);
static struct dsa_switch_tree *dsa_tree_find(int index)
{
struct dsa_switch_tree *dst;
list_for_each_entry(dst, &dsa_tree_list, list)
static struct dsa_switch_tree *dsa_tree_alloc(int index)
{
struct dsa_switch_tree *dst;
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst)
return NULL;
INIT_LIST_HEAD(&dst->rtable);
INIT_LIST_HEAD(&dst->ports);
list_add_tail(&dst->list, &dsa_tree_list);
kref_init(&dst->refcount);
return dst;
}
static void dsa_tree_free(struct dsa_switch_tree *dst)
{
if (dst->tag_ops)
dsa_tag_driver_put(dst->tag_ops);
list_del(&dst->list);
kfree(dst);
}
static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
if (dst)
kref_get(&dst->refcount);
static struct dsa_switch_tree *dsa_tree_touch(int index)
struct dsa_switch_tree *dst;
dst = dsa_tree_find(index);
if (dst)
return dsa_tree_get(dst);
else
return dsa_tree_alloc(index);
}
static void dsa_tree_release(struct kref *ref)
{
struct dsa_switch_tree *dst;
dst = container_of(ref, struct dsa_switch_tree, refcount);
dsa_tree_free(dst);
}
static void dsa_tree_put(struct dsa_switch_tree *dst)
{
if (dst)
kref_put(&dst->refcount, dsa_tree_release);
static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
struct device_node *dn)
list_for_each_entry(dp, &dst->ports, list)
if (dp->dn == dn)
return dp;
static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
struct dsa_port *link_dp)
{
struct dsa_switch *ds = dp->ds;
struct dsa_switch_tree *dst;
struct dsa_link *dl;
dst = ds->dst;
list_for_each_entry(dl, &dst->rtable, list)
if (dl->dp == dp && dl->link_dp == link_dp)
return dl;
dl = kzalloc(sizeof(*dl), GFP_KERNEL);
if (!dl)
return NULL;
dl->dp = dp;
dl->link_dp = link_dp;
INIT_LIST_HEAD(&dl->list);
list_add_tail(&dl->list, &dst->rtable);
return dl;
}
static bool dsa_port_setup_routing_table(struct dsa_port *dp)
struct dsa_switch *ds = dp->ds;
struct dsa_switch_tree *dst = ds->dst;
struct device_node *dn = dp->dn;
of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
link_dp = dsa_tree_find_port_by_node(dst, it.node);
if (!link_dp) {
of_node_put(it.node);
dl = dsa_link_touch(dp, link_dp);
if (!dl) {
of_node_put(it.node);
return false;
}
static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
bool complete = true;
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_is_dsa(dp)) {
complete = dsa_port_setup_routing_table(dp);
if (!complete)
break;
}
static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_cpu(dp))
return dp;
return NULL;
}
/* Assign the default CPU port (the first one in the tree) to all ports of the
* fabric which don't already have one as part of their own switch.
*/
static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
{
struct dsa_port *cpu_dp, *dp;
cpu_dp = dsa_tree_find_first_cpu(dst);
if (!cpu_dp) {
pr_err("DSA: tree %d has no CPU port\n", dst->index);
return -EINVAL;
}
list_for_each_entry(dp, &dst->ports, list) {
if (dp->cpu_dp)
continue;
if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
dp->cpu_dp = cpu_dp;
/* Perform initial assignment of CPU ports to user ports and DSA links in the
* fabric, giving preference to CPU ports local to each switch. Default to
* using the first CPU port in the switch tree if the port does not have a CPU
* port local to this switch.
*/
static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
{
struct dsa_port *cpu_dp, *dp;
list_for_each_entry(cpu_dp, &dst->ports, list) {
if (!dsa_port_is_cpu(cpu_dp))
continue;
/* Prefer a local CPU port */
dsa_switch_for_each_port(dp, cpu_dp->ds) {
/* Prefer the first local CPU port found */
if (dp->cpu_dp)
continue;
if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
dp->cpu_dp = cpu_dp;
}
}
return dsa_tree_setup_default_cpu(dst);
}
static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
dp->cpu_dp = NULL;
static int dsa_port_setup(struct dsa_port *dp)
struct devlink_port *dlp = &dp->devlink_port;
bool dsa_port_link_registered = false;
Vladimir Oltean
committed
struct dsa_switch *ds = dp->ds;
bool dsa_port_enabled = false;
int err = 0;
if (dp->setup)
return 0;
mutex_init(&dp->addr_lists_lock);
Vladimir Oltean
committed
INIT_LIST_HEAD(&dp->fdbs);
Vladimir Oltean
committed
INIT_LIST_HEAD(&dp->mdbs);
Vladimir Oltean
committed
if (ds->ops->port_setup) {
err = ds->ops->port_setup(ds, dp->index);
if (err)
return err;
}
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
err = dsa_port_link_register_of(dp);
break;
dsa_port_link_registered = true;
err = dsa_port_enable(dp, NULL);
break;
dsa_port_enabled = true;
err = dsa_port_link_register_of(dp);
break;
dsa_port_link_registered = true;
err = dsa_port_enable(dp, NULL);
break;
dsa_port_enabled = true;
of_get_mac_address(dp->dn, dp->mac);
err = dsa_slave_create(dp);
if (err)
break;
devlink_port_type_eth_set(dlp, dp->slave);
if (err && dsa_port_enabled)
dsa_port_disable(dp);
if (err && dsa_port_link_registered)
dsa_port_link_unregister_of(dp);
Vladimir Oltean
committed
if (err) {
if (ds->ops->port_teardown)
ds->ops->port_teardown(ds, dp->index);
Vladimir Oltean
committed
}
dp->setup = true;
return 0;
static int dsa_port_devlink_setup(struct dsa_port *dp)
struct devlink_port *dlp = &dp->devlink_port;
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
struct dsa_switch_tree *dst = dp->ds->dst;
struct devlink_port_attrs attrs = {};
struct devlink *dl = dp->ds->devlink;
const unsigned char *id;
unsigned char len;
int err;
id = (const unsigned char *)&dst->index;
len = sizeof(dst->index);
attrs.phys.port_number = dp->index;
memcpy(attrs.switch_id.id, id, len);
attrs.switch_id.id_len = len;
memset(dlp, 0, sizeof(*dlp));
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
break;
case DSA_PORT_TYPE_CPU:
attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
break;
case DSA_PORT_TYPE_DSA:
attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
break;
case DSA_PORT_TYPE_USER:
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
break;
}
devlink_port_attrs_set(dlp, &attrs);
err = devlink_port_register(dl, dlp, dp->index);
if (!err)
dp->devlink_port_setup = true;
return err;
}
static void dsa_port_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
Vladimir Oltean
committed
struct dsa_switch *ds = dp->ds;
Vladimir Oltean
committed
struct dsa_mac_addr *a, *tmp;
if (!dp->setup)
return;
Vladimir Oltean
committed
if (ds->ops->port_teardown)
ds->ops->port_teardown(ds, dp->index);
devlink_port_type_clear(dlp);
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
break;
case DSA_PORT_TYPE_CPU:
dsa_port_link_unregister_of(dp);
break;
dsa_port_link_unregister_of(dp);
break;
case DSA_PORT_TYPE_USER:
if (dp->slave) {
dsa_slave_destroy(dp->slave);
dp->slave = NULL;
}
break;
Vladimir Oltean
committed
list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
list_del(&a->list);
kfree(a);
}
Vladimir Oltean
committed
list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
list_del(&a->list);
kfree(a);
}
static void dsa_port_devlink_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
if (dp->devlink_port_setup)
devlink_port_unregister(dlp);
dp->devlink_port_setup = false;
}
Vladimir Oltean
committed
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
/* Destroy the current devlink port, and create a new one which has the UNUSED
* flavour. At this point, any call to ds->ops->port_setup has been already
* balanced out by a call to ds->ops->port_teardown, so we know that any
* devlink port regions the driver had are now unregistered. We then call its
* ds->ops->port_setup again, in order for the driver to re-create them on the
* new devlink port.
*/
static int dsa_port_reinit_as_unused(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
int err;
dsa_port_devlink_teardown(dp);
dp->type = DSA_PORT_TYPE_UNUSED;
err = dsa_port_devlink_setup(dp);
if (err)
return err;
if (ds->ops->port_setup) {
/* On error, leave the devlink port registered,
* dsa_switch_teardown will clean it up later.
*/
err = ds->ops->port_setup(ds, dp->index);
if (err)
return err;
}
return 0;
}
static int dsa_devlink_info_get(struct devlink *dl,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (ds->ops->devlink_info_get)
return ds->ops->devlink_info_get(ds, req, extack);
return -EOPNOTSUPP;
}
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
static int dsa_devlink_sb_pool_get(struct devlink *dl,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_pool_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
pool_info);
}
static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_pool_set)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
threshold_type, extack);
}
static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_port_pool_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
pool_index, p_threshold);
}
static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
unsigned int sb_index, u16 pool_index,
u32 threshold,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_port_pool_set)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
pool_index, threshold, extack);
}
static int
dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_tc_pool_bind_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
tc_index, pool_type,
p_pool_index, p_threshold);
}
static int
dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_tc_pool_bind_set)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
tc_index, pool_type,
pool_index, threshold,
extack);
}
static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
unsigned int sb_index)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_occ_snapshot)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
}
static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
unsigned int sb_index)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_occ_max_clear)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
}
static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
unsigned int sb_index,
u16 pool_index, u32 *p_cur,
u32 *p_max)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_occ_port_pool_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
pool_index, p_cur, p_max);
}
static int
dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
sb_index, tc_index,
pool_type, p_cur,
p_max);
}
static const struct devlink_ops dsa_devlink_ops = {
.info_get = dsa_devlink_info_get,
.sb_pool_get = dsa_devlink_sb_pool_get,
.sb_pool_set = dsa_devlink_sb_pool_set,
.sb_port_pool_get = dsa_devlink_sb_port_pool_get,
.sb_port_pool_set = dsa_devlink_sb_port_pool_set,
.sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
.sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
.sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
.sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
.sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
{
const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *cpu_dp;
int err;
if (tag_ops->proto == dst->default_proto)
return 0;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
rtnl_lock();
err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
tag_ops->proto);
rtnl_unlock();
if (err) {
dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
tag_ops->name, ERR_PTR(err));
return err;
}
}
return 0;
}
static int dsa_switch_setup(struct dsa_switch *ds)
struct dsa_devlink_priv *dl_priv;
struct dsa_port *dp;
int err;
if (ds->setup)
return 0;
/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
* driver and before ops->setup() has run, since the switch drivers and
* the slave MDIO bus driver rely on these values for probing PHY
* devices or not
*/
ds->phys_mii_mask |= dsa_user_ports(ds);
/* Add the switch to devlink before calling setup, so that setup can
* add dpipe tables
*/
ds->devlink =
devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
if (!ds->devlink)
return -ENOMEM;
dl_priv = devlink_priv(ds->devlink);
dl_priv->ds = ds;
/* Setup devlink port instances now, so that the switch
* setup() can register regions etc, against the ports
*/
dsa_switch_for_each_port(dp, ds) {
err = dsa_port_devlink_setup(dp);
if (err)
goto unregister_devlink_ports;
err = dsa_switch_register_notifier(ds);
if (err)
goto unregister_devlink_ports;
ds->configure_vlan_while_not_filtering = true;
Vladimir Oltean
committed
err = ds->ops->setup(ds);
if (err < 0)
goto unregister_notifier;
Vladimir Oltean
committed
err = dsa_switch_setup_tag_protocol(ds);
if (err)
goto teardown;
if (!ds->slave_mii_bus && ds->ops->phy_read) {
ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus) {
err = -ENOMEM;
dsa_slave_mii_bus_init(ds);
err = mdiobus_register(ds->slave_mii_bus);
if (err < 0)
goto free_slave_mii_bus;
devlink_register(ds->devlink);
free_slave_mii_bus:
if (ds->slave_mii_bus && ds->ops->phy_read)
mdiobus_free(ds->slave_mii_bus);
teardown:
if (ds->ops->teardown)
ds->ops->teardown(ds);
unregister_notifier:
dsa_switch_unregister_notifier(ds);
unregister_devlink_ports:
dsa_switch_for_each_port(dp, ds)
dsa_port_devlink_teardown(dp);
devlink_free(ds->devlink);
ds->devlink = NULL;
return err;
static void dsa_switch_teardown(struct dsa_switch *ds)
struct dsa_port *dp;
if (!ds->setup)
return;
if (ds->devlink)
devlink_unregister(ds->devlink);
if (ds->slave_mii_bus && ds->ops->phy_read) {
mdiobus_unregister(ds->slave_mii_bus);
mdiobus_free(ds->slave_mii_bus);
ds->slave_mii_bus = NULL;
}
if (ds->ops->teardown)
ds->ops->teardown(ds);
dsa_switch_unregister_notifier(ds);
dsa_switch_for_each_port(dp, ds)
dsa_port_devlink_teardown(dp);
devlink_free(ds->devlink);
ds->devlink = NULL;
}
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
/* First tear down the non-shared, then the shared ports. This ensures that
* all work items scheduled by our switchdev handlers for user ports have
* completed before we destroy the refcounting kept on the shared ports.
*/
static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
dsa_port_teardown(dp);
dsa_flush_workqueue();
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
dsa_port_teardown(dp);
}
static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
dsa_switch_teardown(dp->ds);
}
static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
{
list_for_each_entry(dp, &dst->ports, list) {
err = dsa_switch_setup(dp->ds);
list_for_each_entry(dp, &dst->ports, list) {
err = dsa_port_setup(dp);
Vladimir Oltean
committed
err = dsa_port_reinit_as_unused(dp);
if (err)
goto teardown;
}
dsa_tree_teardown_ports(dst);
dsa_tree_teardown_switches(dst);
static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
int err;
list_for_each_entry(dp, &dst->ports, list) {