summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
diff options
context:
space:
mode:
authorKumar Sanghvi2017-09-21 20:11:14 +0200
committerDavid S. Miller2017-09-23 06:28:01 +0200
commit62488e4b53ae02d82ac000f91ec82b5cfb41d6f2 (patch)
tree591ae311ee37cf438b3b64301a412f8c5f59027a /drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
parentcxgb4: add tc flower offload skeleton (diff)
downloadkernel-qcow2-linux-62488e4b53ae02d82ac000f91ec82b5cfb41d6f2.tar.gz
kernel-qcow2-linux-62488e4b53ae02d82ac000f91ec82b5cfb41d6f2.tar.xz
kernel-qcow2-linux-62488e4b53ae02d82ac000f91ec82b5cfb41d6f2.zip
cxgb4: add basic tc flower offload support
Add support to add/remove flows for offload. Following match and action are supported for offloading a flow: Match: ether-protocol, IPv4/IPv6 addresses, L4 ports (TCP/UDP) Action: drop, redirect to another port on the device. The qualifying flows can have accompanying mask information. Signed-off-by: Kumar Sanghvi <kumaras@chelsio.com> Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com> Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c280
1 files changed, 278 insertions, 2 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 16dff71e4d02..dda34d5a52fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -38,16 +38,287 @@
#include "cxgb4.h"
#include "cxgb4_tc_flower.h"
+static struct ch_tc_flower_entry *allocate_flower_entry(void)
+{
+ struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
+ return new;
+}
+
+/* Must be called with either RTNL or rcu_read_lock */
+static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
+ unsigned long flower_cookie)
+{
+ struct ch_tc_flower_entry *flower_entry;
+
+ hash_for_each_possible_rcu(adap->flower_anymatch_tbl, flower_entry,
+ link, flower_cookie)
+ if (flower_entry->tc_flower_cookie == flower_cookie)
+ return flower_entry;
+ return NULL;
+}
+
+static void cxgb4_process_flow_match(struct net_device *dev,
+ struct tc_cls_flower_offload *cls,
+ struct ch_filter_specification *fs)
+{
+ u16 addr_type = 0;
+
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ cls->key);
+
+ addr_type = key->addr_type;
+ }
+
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ cls->key);
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ cls->mask);
+ u16 ethtype_key = ntohs(key->n_proto);
+ u16 ethtype_mask = ntohs(mask->n_proto);
+
+ if (ethtype_key == ETH_P_ALL) {
+ ethtype_key = 0;
+ ethtype_mask = 0;
+ }
+
+ fs->val.ethtype = ethtype_key;
+ fs->mask.ethtype = ethtype_mask;
+ fs->val.proto = key->ip_proto;
+ fs->mask.proto = mask->ip_proto;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_dissector_key_ipv4_addrs *key =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ cls->key);
+ struct flow_dissector_key_ipv4_addrs *mask =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ cls->mask);
+ fs->type = 0;
+ memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst));
+ memcpy(&fs->val.fip[0], &key->src, sizeof(key->src));
+ memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst));
+ memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src));
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_dissector_key_ipv6_addrs *key =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ cls->key);
+ struct flow_dissector_key_ipv6_addrs *mask =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ cls->mask);
+
+ fs->type = 1;
+ memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst));
+ memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src));
+ memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst));
+ memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src));
+ }
+
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_dissector_key_ports *key, *mask;
+
+ key = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ cls->key);
+ mask = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ cls->mask);
+ fs->val.lport = cpu_to_be16(key->dst);
+ fs->mask.lport = cpu_to_be16(mask->dst);
+ fs->val.fport = cpu_to_be16(key->src);
+ fs->mask.fport = cpu_to_be16(mask->src);
+ }
+
+ /* Match only packets coming from the ingress port where this
+ * filter will be created.
+ */
+ fs->val.iport = netdev2pinfo(dev)->port_id;
+ fs->mask.iport = ~0;
+}
+
+static int cxgb4_validate_flow_match(struct net_device *dev,
+ struct tc_cls_flower_offload *cls)
+{
+ if (cls->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ netdev_warn(dev, "Unsupported key used: 0x%x\n",
+ cls->dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void cxgb4_process_flow_actions(struct net_device *in,
+ struct tc_cls_flower_offload *cls,
+ struct ch_filter_specification *fs)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+
+ tcf_exts_to_list(cls->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (is_tcf_gact_shot(a)) {
+ fs->action = FILTER_DROP;
+ } else if (is_tcf_mirred_egress_redirect(a)) {
+ int ifindex = tcf_mirred_ifindex(a);
+ struct net_device *out = __dev_get_by_index(dev_net(in),
+ ifindex);
+ struct port_info *pi = netdev_priv(out);
+
+ fs->action = FILTER_SWITCH;
+ fs->eport = pi->port_id;
+ }
+ }
+}
+
+static int cxgb4_validate_flow_actions(struct net_device *dev,
+ struct tc_cls_flower_offload *cls)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+
+ tcf_exts_to_list(cls->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (is_tcf_gact_shot(a)) {
+ /* Do nothing */
+ } else if (is_tcf_mirred_egress_redirect(a)) {
+ struct adapter *adap = netdev2adap(dev);
+ struct net_device *n_dev;
+ unsigned int i, ifindex;
+ bool found = false;
+
+ ifindex = tcf_mirred_ifindex(a);
+ for_each_port(adap, i) {
+ n_dev = adap->port[i];
+ if (ifindex == n_dev->ifindex) {
+ found = true;
+ break;
+ }
+ }
+
+ /* If interface doesn't belong to our hw, then
+ * the provided output port is not valid
+ */
+ if (!found) {
+ netdev_err(dev, "%s: Out port invalid\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else {
+ netdev_err(dev, "%s: Unsupported action\n", __func__);
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+}
+
int cxgb4_tc_flower_replace(struct net_device *dev,
struct tc_cls_flower_offload *cls)
{
- return -EOPNOTSUPP;
+ struct adapter *adap = netdev2adap(dev);
+ struct ch_tc_flower_entry *ch_flower;
+ struct ch_filter_specification *fs;
+ struct filter_ctx ctx;
+ int fidx;
+ int ret;
+
+ if (cxgb4_validate_flow_actions(dev, cls))
+ return -EOPNOTSUPP;
+
+ if (cxgb4_validate_flow_match(dev, cls))
+ return -EOPNOTSUPP;
+
+ ch_flower = allocate_flower_entry();
+ if (!ch_flower) {
+ netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
+ return -ENOMEM;
+ }
+
+ fs = &ch_flower->fs;
+ fs->hitcnts = 1;
+ cxgb4_process_flow_actions(dev, cls, fs);
+ cxgb4_process_flow_match(dev, cls, fs);
+
+ fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
+ if (fidx < 0) {
+ netdev_err(dev, "%s: No fidx for offload.\n", __func__);
+ ret = -ENOMEM;
+ goto free_entry;
+ }
+
+ init_completion(&ctx.completion);
+ ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
+ if (ret) {
+ netdev_err(dev, "%s: filter creation err %d\n",
+ __func__, ret);
+ goto free_entry;
+ }
+
+ /* Wait for reply */
+ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto free_entry;
+ }
+
+ ret = ctx.result;
+ /* Check if hw returned error for filter creation */
+ if (ret) {
+ netdev_err(dev, "%s: filter creation err %d\n",
+ __func__, ret);
+ goto free_entry;
+ }
+
+ INIT_HLIST_NODE(&ch_flower->link);
+ ch_flower->tc_flower_cookie = cls->cookie;
+ ch_flower->filter_id = ctx.tid;
+ hash_add_rcu(adap->flower_anymatch_tbl, &ch_flower->link, cls->cookie);
+
+ return ret;
+
+free_entry:
+ kfree(ch_flower);
+ return ret;
}
int cxgb4_tc_flower_destroy(struct net_device *dev,
struct tc_cls_flower_offload *cls)
{
- return -EOPNOTSUPP;
+ struct adapter *adap = netdev2adap(dev);
+ struct ch_tc_flower_entry *ch_flower;
+ int ret;
+
+ ch_flower = ch_flower_lookup(adap, cls->cookie);
+ if (!ch_flower)
+ return -ENOENT;
+
+ ret = cxgb4_del_filter(dev, ch_flower->filter_id);
+ if (ret)
+ goto err;
+
+ hash_del_rcu(&ch_flower->link);
+ kfree_rcu(ch_flower, rcu);
+
+err:
+ return ret;
}
int cxgb4_tc_flower_stats(struct net_device *dev,
@@ -55,3 +326,8 @@ int cxgb4_tc_flower_stats(struct net_device *dev,
{
return -EOPNOTSUPP;
}
+
+void cxgb4_init_tc_flower(struct adapter *adap)
+{
+ hash_init(adap->flower_anymatch_tbl);
+}