From ceed73a2cf4aff2921802aa3d21d45280677547d Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Tue, 29 Aug 2017 22:44:18 -0600 Subject: drivers: net: ethernet: qualcomm: rmnet: Initial implementation RmNet driver provides a transport agnostic MAP (multiplexing and aggregation protocol) support in embedded module. Module provides virtual network devices which can be attached to any IP-mode physical device. This will be used to provide all MAP functionality on future hardware in a single consistent location. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 419 +++++++++++++++++++++ 1 file changed, 419 insertions(+) create mode 100644 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c (limited to 'drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c new file mode 100644 index 000000000000..e836d267d5cc --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -0,0 +1,419 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET configuration engine + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_handlers.h" +#include "rmnet_vnd.h" +#include "rmnet_private.h" + +/* Locking scheme - + * The shared resource which needs to be protected is realdev->rx_handler_data. + * For the writer path, this is using rtnl_lock(). The writer paths are + * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These + * paths are already called with rtnl_lock() acquired in. There is also an + * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For + * dereference here, we will need to use rtnl_dereference(). Dev list writing + * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link(). + * For the reader path, the real_dev->rx_handler_data is called in the TX / RX + * path. We only need rcu_read_lock() for these scenarios. In these cases, + * the rcu_read_lock() is held in __dev_queue_xmit() and + * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl() + * to get the relevant information. For dev list reading, we again acquire + * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu(). + * We also use unregister_netdevice_many() to free all rmnet devices in + * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in + * same context. + */ + +/* Local Definitions and Declarations */ +#define RMNET_LOCAL_LOGICAL_ENDPOINT -1 + +struct rmnet_walk_data { + struct net_device *real_dev; + struct list_head *head; + struct rmnet_real_dev_info *real_dev_info; +}; + +static int rmnet_is_real_dev_registered(const struct net_device *real_dev) +{ + rx_handler_func_t *rx_handler; + + rx_handler = rcu_dereference(real_dev->rx_handler); + return (rx_handler == rmnet_rx_handler); +} + +/* Needs either rcu_read_lock() or rtnl lock */ +static struct rmnet_real_dev_info* +__rmnet_get_real_dev_info(const struct net_device *real_dev) +{ + if (rmnet_is_real_dev_registered(real_dev)) + return rcu_dereference_rtnl(real_dev->rx_handler_data); + else + return NULL; +} + +/* Needs rtnl lock */ +static struct rmnet_real_dev_info* +rmnet_get_real_dev_info_rtnl(const struct net_device *real_dev) +{ + return rtnl_dereference(real_dev->rx_handler_data); +} + +static struct rmnet_endpoint* +rmnet_get_endpoint(struct net_device *dev, int config_id) +{ + struct rmnet_real_dev_info *r; + struct rmnet_endpoint *ep; + + if (!rmnet_is_real_dev_registered(dev)) { + ep = rmnet_vnd_get_endpoint(dev); + } else { + r = __rmnet_get_real_dev_info(dev); + + if (!r) + return NULL; + + if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT) + ep = &r->local_ep; + else + ep = &r->muxed_ep[config_id]; + } + + return ep; +} + +static int rmnet_unregister_real_device(struct net_device *real_dev, + struct rmnet_real_dev_info *r) +{ + if (r->nr_rmnet_devs) + return -EINVAL; + + kfree(r); + + netdev_rx_handler_unregister(real_dev); + + /* release reference on real_dev */ + dev_put(real_dev); + + netdev_dbg(real_dev, "Removed from rmnet\n"); + return 0; +} + +static int rmnet_register_real_device(struct net_device *real_dev) +{ + struct rmnet_real_dev_info *r; + int rc; + + ASSERT_RTNL(); + + if (rmnet_is_real_dev_registered(real_dev)) + return 0; + + r = kzalloc(sizeof(*r), GFP_ATOMIC); + if (!r) + return -ENOMEM; + + r->dev = real_dev; + rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, r); + if (rc) { + kfree(r); + return -EBUSY; + } + + /* hold on to real dev for MAP data */ + dev_hold(real_dev); + + netdev_dbg(real_dev, "registered with rmnet\n"); + return 0; +} + +static int rmnet_set_ingress_data_format(struct net_device *dev, u32 idf) +{ + struct rmnet_real_dev_info *r; + + netdev_dbg(dev, "Ingress format 0x%08X\n", idf); + + r = __rmnet_get_real_dev_info(dev); + + r->ingress_data_format = idf; + + return 0; +} + +static int rmnet_set_egress_data_format(struct net_device *dev, u32 edf, + u16 agg_size, u16 agg_count) +{ + struct rmnet_real_dev_info *r; + + netdev_dbg(dev, "Egress format 0x%08X agg size %d cnt %d\n", + edf, agg_size, agg_count); + + r = __rmnet_get_real_dev_info(dev); + + r->egress_data_format = edf; + + return 0; +} + +static int __rmnet_set_endpoint_config(struct net_device *dev, int config_id, + struct rmnet_endpoint *ep) +{ + struct rmnet_endpoint *dev_ep; + + dev_ep = rmnet_get_endpoint(dev, config_id); + + if (!dev_ep) + return -EINVAL; + + memcpy(dev_ep, ep, sizeof(struct rmnet_endpoint)); + if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT) + dev_ep->mux_id = 0; + else + dev_ep->mux_id = config_id; + + return 0; +} + +static int rmnet_set_endpoint_config(struct net_device *dev, + int config_id, u8 rmnet_mode, + struct net_device *egress_dev) +{ + struct rmnet_endpoint ep; + + netdev_dbg(dev, "id %d mode %d dev %s\n", + config_id, rmnet_mode, egress_dev->name); + + if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT || + config_id >= RMNET_MAX_LOGICAL_EP) + return -EINVAL; + + /* This config is cleared on every set, so its ok to not + * clear it on a device delete. + */ + memset(&ep, 0, sizeof(struct rmnet_endpoint)); + ep.rmnet_mode = rmnet_mode; + ep.egress_dev = egress_dev; + + return __rmnet_set_endpoint_config(dev, config_id, &ep); +} + +static int rmnet_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + int ingress_format = RMNET_INGRESS_FORMAT_DEMUXING | + RMNET_INGRESS_FORMAT_DEAGGREGATION | + RMNET_INGRESS_FORMAT_MAP; + int egress_format = RMNET_EGRESS_FORMAT_MUXING | + RMNET_EGRESS_FORMAT_MAP; + struct rmnet_real_dev_info *r; + struct net_device *real_dev; + int mode = RMNET_EPMODE_VND; + int err = 0; + u16 mux_id; + + real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + if (!real_dev || !dev) + return -ENODEV; + + if (!data[IFLA_VLAN_ID]) + return -EINVAL; + + mux_id = nla_get_u16(data[IFLA_VLAN_ID]); + + err = rmnet_register_real_device(real_dev); + if (err) + goto err0; + + r = rmnet_get_real_dev_info_rtnl(real_dev); + err = rmnet_vnd_newlink(mux_id, dev, r); + if (err) + goto err1; + + err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL); + if (err) + goto err2; + + rmnet_vnd_set_mux(dev, mux_id); + rmnet_set_egress_data_format(real_dev, egress_format, 0, 0); + rmnet_set_ingress_data_format(real_dev, ingress_format); + rmnet_set_endpoint_config(real_dev, mux_id, mode, dev); + rmnet_set_endpoint_config(dev, mux_id, mode, real_dev); + return 0; + +err2: + rmnet_vnd_dellink(mux_id, r); +err1: + rmnet_unregister_real_device(real_dev, r); +err0: + return err; +} + +static void rmnet_dellink(struct net_device *dev, struct list_head *head) +{ + struct rmnet_real_dev_info *r; + struct net_device *real_dev; + u8 mux_id; + + rcu_read_lock(); + real_dev = netdev_master_upper_dev_get_rcu(dev); + rcu_read_unlock(); + + if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) + return; + + r = rmnet_get_real_dev_info_rtnl(real_dev); + + mux_id = rmnet_vnd_get_mux(dev); + rmnet_vnd_dellink(mux_id, r); + netdev_upper_dev_unlink(dev, real_dev); + rmnet_unregister_real_device(real_dev, r); + + unregister_netdevice_queue(dev, head); +} + +static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data) +{ + struct rmnet_walk_data *d = data; + u8 mux_id; + + mux_id = rmnet_vnd_get_mux(rmnet_dev); + + rmnet_vnd_dellink(mux_id, d->real_dev_info); + netdev_upper_dev_unlink(rmnet_dev, d->real_dev); + unregister_netdevice_queue(rmnet_dev, d->head); + + return 0; +} + +static void rmnet_force_unassociate_device(struct net_device *dev) +{ + struct net_device *real_dev = dev; + struct rmnet_real_dev_info *r; + struct rmnet_walk_data d; + LIST_HEAD(list); + + if (!rmnet_is_real_dev_registered(real_dev)) + return; + + ASSERT_RTNL(); + + d.real_dev = real_dev; + d.head = &list; + + r = rmnet_get_real_dev_info_rtnl(dev); + d.real_dev_info = r; + + rcu_read_lock(); + netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d); + rcu_read_unlock(); + unregister_netdevice_many(&list); + + rmnet_unregister_real_device(real_dev, r); +} + +static int rmnet_config_notify_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct net_device *dev = netdev_notifier_info_to_dev(data); + + if (!dev) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UNREGISTER: + netdev_dbg(dev, "Kernel unregister\n"); + rmnet_force_unassociate_device(dev); + break; + + default: + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rmnet_dev_notifier __read_mostly = { + .notifier_call = rmnet_config_notify_cb, +}; + +static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + u16 mux_id; + + if (!data || !data[IFLA_VLAN_ID]) + return -EINVAL; + + mux_id = nla_get_u16(data[IFLA_VLAN_ID]); + if (mux_id > (RMNET_MAX_LOGICAL_EP - 1)) + return -ERANGE; + + return 0; +} + +static size_t rmnet_get_size(const struct net_device *dev) +{ + return nla_total_size(2); /* IFLA_VLAN_ID */ +} + +struct rtnl_link_ops rmnet_link_ops __read_mostly = { + .kind = "rmnet", + .maxtype = __IFLA_VLAN_MAX, + .priv_size = sizeof(struct rmnet_priv), + .setup = rmnet_vnd_setup, + .validate = rmnet_rtnl_validate, + .newlink = rmnet_newlink, + .dellink = rmnet_dellink, + .get_size = rmnet_get_size, +}; + +struct rmnet_real_dev_info* +rmnet_get_real_dev_info(struct net_device *real_dev) +{ + return __rmnet_get_real_dev_info(real_dev); +} + +/* Startup/Shutdown */ + +static int __init rmnet_init(void) +{ + int rc; + + rc = register_netdevice_notifier(&rmnet_dev_notifier); + if (rc != 0) + return rc; + + rc = rtnl_link_register(&rmnet_link_ops); + if (rc != 0) { + unregister_netdevice_notifier(&rmnet_dev_notifier); + return rc; + } + return rc; +} + +static void __exit rmnet_exit(void) +{ + unregister_netdevice_notifier(&rmnet_dev_notifier); + rtnl_link_unregister(&rmnet_link_ops); +} + +module_init(rmnet_init) +module_exit(rmnet_exit) +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-55-g7522